1 /* $NetBSD: arm32_kvminit.c,v 1.70 2025/12/19 13:03:51 nia Exp $ */ 2 3 /* 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 5 * Written by Hiroyuki Bessho for Genetec Corporation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of Genetec Corporation may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (c) 2001 Wasabi Systems, Inc. 32 * All rights reserved. 33 * 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed for the NetBSD Project by 47 * Wasabi Systems, Inc. 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 49 * or promote products derived from this software without specific prior 50 * written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * POSSIBILITY OF SUCH DAMAGE. 63 * 64 * Copyright (c) 1997,1998 Mark Brinicombe. 65 * Copyright (c) 1997,1998 Causality Limited. 66 * All rights reserved. 67 * 68 * Redistribution and use in source and binary forms, with or without 69 * modification, are permitted provided that the following conditions 70 * are met: 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgement: 78 * This product includes software developed by Mark Brinicombe 79 * for the NetBSD Project. 80 * 4. The name of the company nor the name of the author may be used to 81 * endorse or promote products derived from this software without specific 82 * prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 94 * SUCH DAMAGE. 95 * 96 * Copyright (c) 2007 Microsoft 97 * All rights reserved. 98 * 99 * Redistribution and use in source and binary forms, with or without 100 * modification, are permitted provided that the following conditions 101 * are met: 102 * 1. Redistributions of source code must retain the above copyright 103 * notice, this list of conditions and the following disclaimer. 104 * 2. Redistributions in binary form must reproduce the above copyright 105 * notice, this list of conditions and the following disclaimer in the 106 * documentation and/or other materials provided with the distribution. 107 * 108 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 109 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 110 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 111 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 112 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 113 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 114 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 115 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 116 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 117 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 118 * SUCH DAMAGE. 119 */ 120 121 #include "opt_arm_debug.h" 122 #include "opt_arm_start.h" 123 #include "opt_efi.h" 124 #include "opt_fdt.h" 125 #include "opt_multiprocessor.h" 126 127 #include <sys/cdefs.h> 128 __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.70 2025/12/19 13:03:51 nia Exp $"); 129 130 #include <sys/param.h> 131 132 #include <sys/asan.h> 133 #include <sys/bus.h> 134 #include <sys/device.h> 135 #include <sys/kernel.h> 136 #include <sys/reboot.h> 137 138 #include <dev/cons.h> 139 140 #include <uvm/uvm_extern.h> 141 142 #include <arm/arm32/machdep.h> 143 #include <arm/bootconfig.h> 144 #include <arm/db_machdep.h> 145 #include <arm/locore.h> 146 #include <arm/undefined.h> 147 148 #if defined(FDT) 149 #include <arch/evbarm/fdt/platform.h> 150 #include <arm/fdt/arm_fdtvar.h> 151 #include <dev/fdt/fdt_memory.h> 152 #endif 153 154 #ifdef MULTIPROCESSOR 155 #ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP 156 #error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack 157 #endif 158 #endif 159 160 #ifdef VERBOSE_INIT_ARM 161 #define VPRINTF(...) printf(__VA_ARGS__) 162 #else 163 #define VPRINTF(...) __nothing 164 #endif 165 166 #if defined(__HAVE_GENERIC_START) 167 #if defined(KERNEL_BASE_VOFFSET) 168 #error KERNEL_BASE_VOFFSET should not be defined with __HAVE_GENERIC_START 169 #endif 170 #endif 171 172 #if defined(EFI_RUNTIME) 173 #if !defined(ARM_MMU_EXTENDED) 174 #error EFI_RUNTIME is only supported with ARM_MMU_EXTENDED 175 #endif 176 #endif 177 178 struct bootmem_info bootmem_info; 179 180 extern void *msgbufaddr; 181 paddr_t msgbufphys; 182 paddr_t physical_start; 183 paddr_t physical_end; 184 185 extern char etext[]; 186 extern char __data_start[], _edata[]; 187 extern char __bss_start[], __bss_end__[]; 188 extern char _end[]; 189 190 /* Page tables for mapping kernel VM */ 191 #define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 192 193 #ifdef KASAN 194 vaddr_t kasan_kernelstart; 195 vaddr_t kasan_kernelsize; 196 197 #define KERNEL_L2PT_KASAN_NUM howmany(VM_KERNEL_KASAN_SIZE, L2_S_SEGSIZE) 198 bool kasan_l2pts_created __attribute__((__section__(".data"))) = false; 199 pv_addr_t kasan_l2pt[KERNEL_L2PT_KASAN_NUM]; 200 #else 201 #define KERNEL_L2PT_KASAN_NUM 0 202 #endif 203 204 u_long kern_vtopdiff __attribute__((__section__(".data"))); 205 206 void 207 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 208 { 209 struct bootmem_info * const bmi = &bootmem_info; 210 pv_addr_t *pv = bmi->bmi_freeblocks; 211 212 /* 213 * FDT/generic start fills in kern_vtopdiff early 214 */ 215 #if defined(__HAVE_GENERIC_START) 216 extern char KERNEL_BASE_virt[]; 217 extern char const __stop__init_memory[]; 218 219 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff); 220 221 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt); 222 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory); 223 224 kernelstart = KERN_VTOPHYS(kstartva); 225 226 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart); 227 #else 228 vaddr_t kendva = round_page((vaddr_t)_end); 229 230 #if defined(KERNEL_BASE_VOFFSET) 231 kern_vtopdiff = KERNEL_BASE_VOFFSET; 232 #else 233 KASSERT(memstart == kernelstart); 234 kern_vtopdiff = KERNEL_BASE + memstart; 235 #endif 236 #endif 237 paddr_t kernelend = KERN_VTOPHYS(kendva); 238 239 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__, 240 memstart, memsize); 241 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__, 242 kernelstart, kernelend); 243 244 physical_start = bmi->bmi_start = memstart; 245 physical_end = bmi->bmi_end = memstart + memsize; 246 #ifndef ARM_HAS_LPAE 247 if (physical_end == 0) { 248 physical_end = -PAGE_SIZE; 249 memsize -= PAGE_SIZE; 250 bmi->bmi_end -= PAGE_SIZE; 251 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n", 252 __func__); 253 } 254 #endif 255 physmem = memsize / PAGE_SIZE; 256 257 /* 258 * Let's record where the kernel lives. 259 */ 260 261 bmi->bmi_kernelstart = kernelstart; 262 bmi->bmi_kernelend = kernelend; 263 264 #if defined(FDT) 265 fdt_memory_remove_range(bmi->bmi_kernelstart, 266 bmi->bmi_kernelend - bmi->bmi_kernelstart); 267 #endif 268 269 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart, 270 kernelend); 271 272 #if 0 273 // XXX Makes RPI abort 274 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0); 275 #endif 276 /* 277 * Now the rest of the free memory must be after the kernel. 278 */ 279 pv->pv_pa = bmi->bmi_kernelend; 280 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 281 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 282 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 283 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 284 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 285 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 286 pv++; 287 288 /* 289 * Add a free block for any memory before the kernel. 290 */ 291 if (bmi->bmi_start < bmi->bmi_kernelstart) { 292 pv->pv_pa = bmi->bmi_start; 293 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 294 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa; 295 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 296 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 297 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 298 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 299 pv++; 300 } 301 302 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 303 304 SLIST_INIT(&bmi->bmi_freechunks); 305 SLIST_INIT(&bmi->bmi_chunks); 306 } 307 308 static bool 309 concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 310 { 311 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 312 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 313 && acc_pv->pv_prot == pv->pv_prot 314 && acc_pv->pv_cache == pv->pv_cache) { 315 #if 0 316 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 317 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size, 318 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size); 319 #endif 320 acc_pv->pv_size += pv->pv_size; 321 return true; 322 } 323 324 return false; 325 } 326 327 static void 328 add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 329 { 330 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 331 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 332 pv_addr_t * const pv0 = (*pvp); 333 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 334 if (concat_pvaddr(pv0, pv)) { 335 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 336 __func__, "appending", pv, 337 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 338 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 339 pv = SLIST_NEXT(pv0, pv_list); 340 if (pv != NULL && concat_pvaddr(pv0, pv)) { 341 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 342 __func__, "merging", pv, 343 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 344 pv0->pv_pa, 345 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 346 SLIST_REMOVE_AFTER(pv0, pv_list); 347 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 348 } 349 return; 350 } 351 KASSERT(pv->pv_va != (*pvp)->pv_va); 352 pvp = &SLIST_NEXT(*pvp, pv_list); 353 } 354 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 355 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 356 KASSERT(new_pv != NULL); 357 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 358 *new_pv = *pv; 359 SLIST_NEXT(new_pv, pv_list) = *pvp; 360 (*pvp) = new_pv; 361 362 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 363 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 364 new_pv->pv_size / PAGE_SIZE); 365 if (SLIST_NEXT(new_pv, pv_list)) { 366 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 367 } else { 368 VPRINTF("at tail\n"); 369 } 370 } 371 372 static void 373 valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 374 int prot, int cache, bool zero_p) 375 { 376 size_t nbytes = npages * PAGE_SIZE; 377 pv_addr_t *free_pv = bmi->bmi_freeblocks; 378 size_t free_idx = 0; 379 static bool l1pt_found; 380 381 KASSERT(npages > 0); 382 383 /* 384 * If we haven't allocated the kernel L1 page table and we are aligned 385 * at a L1 table boundary, alloc the memory for it. 386 */ 387 if (!l1pt_found 388 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 389 && free_pv->pv_size >= L1_TABLE_SIZE) { 390 l1pt_found = true; 391 VPRINTF(" l1pt"); 392 393 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 394 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 395 add_pages(bmi, &kernel_l1pt); 396 #if defined(EFI_RUNTIME) 397 valloc_pages(bmi, &efirt_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 398 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 399 add_pages(bmi, &efirt_l1pt); 400 #endif 401 } 402 403 while (nbytes > free_pv->pv_size) { 404 free_pv++; 405 free_idx++; 406 if (free_idx == bmi->bmi_nfreeblocks) { 407 panic("%s: could not allocate %zu bytes", 408 __func__, nbytes); 409 } 410 } 411 412 /* 413 * As we allocate the memory, make sure that we don't walk over 414 * our current first level translation table. 415 */ 416 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 417 418 #if defined(FDT) 419 fdt_memory_remove_range(free_pv->pv_pa, nbytes); 420 #endif 421 pv->pv_pa = free_pv->pv_pa; 422 pv->pv_va = free_pv->pv_va; 423 pv->pv_size = nbytes; 424 pv->pv_prot = prot; 425 pv->pv_cache = cache; 426 427 /* 428 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 429 * just use PTE_CACHE. 430 */ 431 if (cache == PTE_PAGETABLE 432 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 433 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 434 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 435 pv->pv_cache = PTE_CACHE; 436 437 free_pv->pv_pa += nbytes; 438 free_pv->pv_va += nbytes; 439 free_pv->pv_size -= nbytes; 440 if (free_pv->pv_size == 0) { 441 --bmi->bmi_nfreeblocks; 442 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 443 free_pv[0] = free_pv[1]; 444 } 445 } 446 447 bmi->bmi_freepages -= npages; 448 449 if (zero_p) 450 memset((void *)pv->pv_va, 0, nbytes); 451 } 452 453 void 454 arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 455 const struct pmap_devmap *devmap, bool mapallmem_p) 456 { 457 struct bootmem_info * const bmi = &bootmem_info; 458 #ifdef MULTIPROCESSOR 459 const size_t cpu_num = arm_cpu_max; 460 #else 461 const size_t cpu_num = 1; 462 #endif 463 464 #ifdef ARM_HAS_VBAR 465 const bool map_vectors_p = false; 466 #elif defined(CPU_ARMV7) || defined(CPU_ARM11) 467 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH 468 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0; 469 #else 470 const bool map_vectors_p = true; 471 #endif 472 473 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 474 KASSERT(mapallmem_p); 475 #ifdef ARM_MMU_EXTENDED 476 /* 477 * The direct map VA space ends at the start of the kernel VM space. 478 */ 479 pmap_directlimit = kernel_vm_base; 480 #else 481 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start); 482 #endif /* ARM_MMU_EXTENDED */ 483 #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 484 485 /* 486 * Calculate the number of L2 pages needed for mapping the 487 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 488 * and 1 for IO 489 */ 490 size_t kernel_size = bmi->bmi_kernelend; 491 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 492 kernel_size += L1_TABLE_SIZE; 493 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM; 494 kernel_size += PAGE_SIZE * KERNEL_L2PT_KASAN_NUM; 495 if (map_vectors_p) { 496 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */ 497 } 498 if (iovbase) { 499 kernel_size += PAGE_SIZE; /* L2PT for IO */ 500 } 501 kernel_size += 502 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 503 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 504 kernel_size += round_page(MSGBUFSIZE); 505 kernel_size += 0x10000; /* slop */ 506 if (!mapallmem_p) { 507 kernel_size += PAGE_SIZE 508 * howmany(kernel_size, L2_S_SEGSIZE); 509 } 510 kernel_size = round_page(kernel_size); 511 512 /* 513 * Now we know how many L2 pages it will take. 514 */ 515 const size_t KERNEL_L2PT_KERNEL_NUM = 516 howmany(kernel_size, L2_S_SEGSIZE); 517 518 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 519 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 520 521 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 522 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 523 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 524 pv_addr_t msgbuf; 525 pv_addr_t text; 526 pv_addr_t data; 527 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11]; 528 #if ARM_MMU_XSCALE == 1 529 pv_addr_t minidataclean; 530 #endif 531 532 /* 533 * We need to allocate some fixed page tables to get the kernel going. 534 * 535 * We are going to allocate our bootstrap pages from the beginning of 536 * the free space that we just calculated. We allocate one page 537 * directory and a number of page tables and store the physical 538 * addresses in the bmi_l2pts array in bootmem_info. 539 * 540 * The kernel page directory must be on a 16K boundary. The page 541 * tables must be on 4K boundaries. What we do is allocate the 542 * page directory on the first 16K boundary that we encounter, and 543 * the page tables on 4K boundaries otherwise. Since we allocate 544 * at least 3 L2 page tables, we are guaranteed to encounter at 545 * least one 16K aligned region. 546 */ 547 548 VPRINTF("%s: allocating page tables for", __func__); 549 for (size_t i = 0; i < __arraycount(chunks); i++) { 550 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 551 } 552 553 kernel_l1pt.pv_pa = 0; 554 kernel_l1pt.pv_va = 0; 555 556 #if defined(EFI_RUNTIME) 557 efirt_l1pt.pv_pa = 0; 558 efirt_l1pt.pv_va = 0; 559 #endif 560 /* 561 * Allocate the L2 pages, but if we get to a page that is aligned for 562 * an L1 page table, we will allocate the pages for it first and then 563 * allocate the L2 page. 564 */ 565 566 if (map_vectors_p) { 567 /* 568 * First allocate L2 page for the vectors. 569 */ 570 VPRINTF(" vector"); 571 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, 572 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 573 add_pages(bmi, &bmi->bmi_vector_l2pt); 574 } 575 576 /* 577 * Now allocate L2 pages for the kernel 578 */ 579 VPRINTF(" kernel"); 580 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 581 valloc_pages(bmi, &kernel_l2pt[idx], 1, 582 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 583 add_pages(bmi, &kernel_l2pt[idx]); 584 } 585 586 /* 587 * Now allocate L2 pages for the initial kernel VA space. 588 */ 589 VPRINTF(" vm"); 590 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 591 valloc_pages(bmi, &vmdata_l2pt[idx], 1, 592 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 593 add_pages(bmi, &vmdata_l2pt[idx]); 594 } 595 596 #ifdef KASAN 597 /* 598 * Now allocate L2 pages for the KASAN shadow map l2pt VA space. 599 */ 600 VPRINTF(" kasan"); 601 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; ++idx) { 602 valloc_pages(bmi, &kasan_l2pt[idx], 1, 603 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 604 add_pages(bmi, &kasan_l2pt[idx]); 605 } 606 607 #endif 608 /* 609 * If someone wanted a L2 page for I/O, allocate it now. 610 */ 611 if (iovbase) { 612 VPRINTF(" io"); 613 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, 614 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 615 add_pages(bmi, &bmi->bmi_io_l2pt); 616 } 617 618 VPRINTF("%s: allocating stacks\n", __func__); 619 620 /* Allocate stacks for all modes and CPUs */ 621 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 622 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 623 add_pages(bmi, &abtstack); 624 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 625 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 626 add_pages(bmi, &fiqstack); 627 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 628 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 629 add_pages(bmi, &irqstack); 630 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 631 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 632 add_pages(bmi, &undstack); 633 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 634 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 635 add_pages(bmi, &idlestack); 636 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 637 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 638 add_pages(bmi, &kernelstack); 639 640 /* Allocate the message buffer from the end of memory. */ 641 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 642 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 643 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, false); 644 add_pages(bmi, &msgbuf); 645 msgbufphys = msgbuf.pv_pa; 646 msgbufaddr = (void *)msgbuf.pv_va; 647 648 #ifdef KASAN 649 kasan_kernelstart = KERNEL_BASE; 650 kasan_kernelsize = (msgbuf.pv_va + round_page(MSGBUFSIZE)) - KERNEL_BASE; 651 #endif 652 653 if (map_vectors_p) { 654 /* 655 * Allocate a page for the system vector page. 656 * This page will just contain the system vectors and can be 657 * shared by all processes. 658 */ 659 VPRINTF(" vector"); 660 661 valloc_pages(bmi, &systempage, 1, 662 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 663 PTE_CACHE, true); 664 } 665 systempage.pv_va = vectors; 666 667 /* 668 * If the caller needed a few extra pages for some reason, allocate 669 * them now. 670 */ 671 #if ARM_MMU_XSCALE == 1 672 #if (ARM_NMMUS > 1) 673 if (xscale_use_minidata) 674 #endif 675 valloc_pages(bmi, &minidataclean, 1, 676 VM_PROT_READ | VM_PROT_WRITE, 0, true); 677 #endif 678 679 /* 680 * Ok we have allocated physical pages for the primary kernel 681 * page tables and stacks. Let's just confirm that. 682 */ 683 if (kernel_l1pt.pv_va == 0 684 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 685 panic("%s: Failed to allocate or align the kernel " 686 "page directory", __func__); 687 688 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n", 689 kernel_l1pt.pv_va, kernel_l1pt.pv_pa); 690 691 /* 692 * Now we start construction of the L1 page table 693 * We start by mapping the L2 page tables into the L1. 694 * This means that we can replace L1 mappings later on if necessary 695 */ 696 vaddr_t l1pt_va = kernel_l1pt.pv_va; 697 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 698 699 if (map_vectors_p) { 700 /* Map the L2 pages tables in the L1 page table */ 701 const vaddr_t va = systempage.pv_va & -L2_S_SEGSIZE; 702 703 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_vector_l2pt); 704 705 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 706 __func__, bmi->bmi_vector_l2pt.pv_va, 707 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va, "(vectors)"); 708 } 709 710 /* 711 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel 712 * start PA 713 */ 714 const vaddr_t kernel_base = 715 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE); 716 717 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__, 718 kernel_base, KERNEL_L2PT_KERNEL_NUM); 719 720 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 721 const vaddr_t va = kernel_base + idx * L2_S_SEGSIZE; 722 723 pmap_link_l2pt(l1pt_va, va, &kernel_l2pt[idx]); 724 725 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 726 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa, 727 va, "(kernel)"); 728 } 729 730 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__, 731 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM); 732 733 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 734 const vaddr_t va = kernel_vm_base + idx * L2_S_SEGSIZE; 735 736 pmap_link_l2pt(l1pt_va, va, &vmdata_l2pt[idx]); 737 738 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 739 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 740 va, "(vm)"); 741 } 742 if (iovbase) { 743 const vaddr_t va = iovbase & -L2_S_SEGSIZE; 744 745 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_io_l2pt); 746 747 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 748 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 749 va, "(io)"); 750 } 751 752 #ifdef KASAN 753 VPRINTF("%s: kasan_shadow_base %x KERNEL_L2PT_KASAN_NUM %d\n", __func__, 754 VM_KERNEL_KASAN_BASE, KERNEL_L2PT_KASAN_NUM); 755 756 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; idx++) { 757 const vaddr_t va = VM_KERNEL_KASAN_BASE + idx * L2_S_SEGSIZE; 758 759 pmap_link_l2pt(l1pt_va, va, &kasan_l2pt[idx]); 760 761 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 762 __func__, kasan_l2pt[idx].pv_va, kasan_l2pt[idx].pv_pa, 763 va, "(kasan)"); 764 } 765 kasan_l2pts_created = true; 766 #endif 767 768 /* update the top of the kernel VM */ 769 pmap_curmaxkvaddr = 770 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 771 772 // This could be done earlier and then the kernel data and pages 773 // allocated above would get merged (concatentated) 774 775 VPRINTF("Mapping kernel\n"); 776 777 extern char etext[]; 778 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 779 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart; 780 781 textsize = (textsize + PGOFSET) & ~PGOFSET; 782 783 /* start at offset of kernel in RAM */ 784 785 text.pv_pa = bmi->bmi_kernelstart; 786 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart); 787 text.pv_size = textsize; 788 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE; 789 text.pv_cache = PTE_CACHE; 790 791 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 792 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 793 794 add_pages(bmi, &text); 795 796 data.pv_pa = text.pv_pa + textsize; 797 data.pv_va = text.pv_va + textsize; 798 data.pv_size = totalsize - textsize; 799 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 800 data.pv_cache = PTE_CACHE; 801 802 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 803 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 804 805 add_pages(bmi, &data); 806 807 VPRINTF("Listing Chunks\n"); 808 809 pv_addr_t *lpv; 810 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) { 811 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx " 812 "(PA %#lx, prot %d, cache %d)\n", 813 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1, 814 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache); 815 } 816 VPRINTF("\nMapping Chunks\n"); 817 818 pv_addr_t cur_pv; 819 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 820 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 821 cur_pv = *pv; 822 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va); 823 pv = SLIST_NEXT(pv, pv_list); 824 } else { 825 cur_pv.pv_va = KERNEL_BASE; 826 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va); 827 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa; 828 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 829 cur_pv.pv_cache = PTE_CACHE; 830 } 831 while (pv != NULL) { 832 if (mapallmem_p) { 833 if (concat_pvaddr(&cur_pv, pv)) { 834 pv = SLIST_NEXT(pv, pv_list); 835 continue; 836 } 837 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 838 /* 839 * See if we can extend the current pv to emcompass the 840 * hole, and if so do it and retry the concatenation. 841 */ 842 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 843 && cur_pv.pv_cache == PTE_CACHE) { 844 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 845 continue; 846 } 847 848 /* 849 * We couldn't so emit the current chunk and then 850 */ 851 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 852 "(PA %#lx, prot %d, cache %d)\n", 853 __func__, 854 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 855 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 856 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 857 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 858 859 /* 860 * set the current chunk to the hole and try again. 861 */ 862 cur_pv.pv_pa += cur_pv.pv_size; 863 cur_pv.pv_va += cur_pv.pv_size; 864 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 865 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 866 cur_pv.pv_cache = PTE_CACHE; 867 continue; 868 } 869 } 870 871 /* 872 * The new pv didn't concatenate so emit the current one 873 * and use the new pv as the current pv. 874 */ 875 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 876 "(PA %#lx, prot %d, cache %d)\n", 877 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 878 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 879 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 880 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 881 cur_pv = *pv; 882 pv = SLIST_NEXT(pv, pv_list); 883 } 884 885 /* 886 * If we are mapping all of memory, let's map the rest of memory. 887 */ 888 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 889 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 890 && cur_pv.pv_cache == PTE_CACHE) { 891 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 892 } else { 893 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base, 894 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size, 895 kernel_vm_base); 896 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 897 "(PA %#lx, prot %d, cache %d)\n", 898 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 899 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 900 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 901 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 902 cur_pv.pv_pa += cur_pv.pv_size; 903 cur_pv.pv_va += cur_pv.pv_size; 904 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 905 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 906 cur_pv.pv_cache = PTE_CACHE; 907 } 908 } 909 910 /* 911 * The amount we can direct map is limited by the start of the 912 * virtual part of the kernel address space. Don't overrun 913 * into it. 914 */ 915 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) { 916 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va; 917 } 918 919 /* 920 * Now we map the final chunk. 921 */ 922 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 923 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 924 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 925 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 926 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 927 928 /* 929 * Now we map the stuff that isn't directly after the kernel 930 */ 931 if (map_vectors_p) { 932 /* Map the vector page. */ 933 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 934 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, PTE_CACHE); 935 } 936 937 /* Map the Mini-Data cache clean area. */ 938 #if ARM_MMU_XSCALE == 1 939 #if (ARM_NMMUS > 1) 940 if (xscale_use_minidata) 941 #endif 942 xscale_setup_minidata(l1pt_va, minidataclean.pv_va, 943 minidataclean.pv_pa); 944 #endif 945 946 /* 947 * Map integrated peripherals at same address in first level page 948 * table so that we can continue to use console. 949 */ 950 if (devmap) 951 pmap_devmap_bootstrap(l1pt_va, devmap); 952 953 /* Tell the user about where all the bits and pieces live. */ 954 VPRINTF("%22s Physical Virtual Num\n", " "); 955 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " "); 956 957 #ifdef VERBOSE_INIT_ARM 958 static const char mem_fmt[] = 959 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 960 static const char mem_fmt_nov[] = 961 "%20s: 0x%08lx 0x%08lx %zu\n"; 962 #endif 963 964 #if 0 965 // XXX Doesn't make sense if kernel not at bottom of RAM 966 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 967 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1), 968 (int)physmem); 969 #endif 970 VPRINTF(mem_fmt, "text section", 971 text.pv_pa, text.pv_pa + text.pv_size - 1, 972 text.pv_va, text.pv_va + text.pv_size - 1, 973 (int)(text.pv_size / PAGE_SIZE)); 974 VPRINTF(mem_fmt, "data section", 975 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata), 976 (vaddr_t)__data_start, (vaddr_t)_edata, 977 (int)((round_page((vaddr_t)_edata) 978 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 979 VPRINTF(mem_fmt, "bss section", 980 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__), 981 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 982 (int)((round_page((vaddr_t)__bss_end__) 983 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 984 VPRINTF(mem_fmt, "L1 page directory", 985 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 986 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 987 L1_TABLE_SIZE / PAGE_SIZE); 988 #if defined(EFI_RUNTIME) 989 VPRINTF(mem_fmt, "EFI L1 page directory", 990 efirt_l1pt.pv_pa, efirt_l1pt.pv_pa + L1_TABLE_SIZE - 1, 991 efirt_l1pt.pv_va, efirt_l1pt.pv_va + L1_TABLE_SIZE - 1, 992 L1_TABLE_SIZE / PAGE_SIZE); 993 #endif 994 VPRINTF(mem_fmt, "ABT stack (CPU 0)", 995 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 996 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 997 ABT_STACK_SIZE); 998 VPRINTF(mem_fmt, "FIQ stack (CPU 0)", 999 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 1000 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 1001 FIQ_STACK_SIZE); 1002 VPRINTF(mem_fmt, "IRQ stack (CPU 0)", 1003 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 1004 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 1005 IRQ_STACK_SIZE); 1006 VPRINTF(mem_fmt, "UND stack (CPU 0)", 1007 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 1008 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 1009 UND_STACK_SIZE); 1010 VPRINTF(mem_fmt, "IDLE stack (CPU 0)", 1011 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 1012 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 1013 UPAGES); 1014 VPRINTF(mem_fmt, "SVC stack", 1015 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 1016 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 1017 UPAGES); 1018 VPRINTF(mem_fmt, "Message Buffer", 1019 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 1020 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 1021 (int)msgbuf_pgs); 1022 if (map_vectors_p) { 1023 VPRINTF(mem_fmt, "Exception Vectors", 1024 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 1025 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 1026 1); 1027 } 1028 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 1029 pv = &bmi->bmi_freeblocks[i]; 1030 1031 VPRINTF(mem_fmt_nov, "Free Memory", 1032 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 1033 pv->pv_size / PAGE_SIZE); 1034 } 1035 /* 1036 * Now we have the real page tables in place so we can switch to them. 1037 * Once this is done we will be running with the REAL kernel page 1038 * tables. 1039 */ 1040 1041 VPRINTF("TTBR0=%#x", armreg_ttbr_read()); 1042 #ifdef _ARM_ARCH_6 1043 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x", 1044 armreg_ttbr1_read(), armreg_ttbcr_read(), 1045 armreg_contextidr_read()); 1046 #endif 1047 VPRINTF("\n"); 1048 1049 /* Switch tables */ 1050 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa); 1051 1052 cpu_ttb = l1pt_pa; 1053 1054 cpu_domains(DOMAIN_DEFAULT); 1055 1056 cpu_idcache_wbinv_all(); 1057 1058 #ifdef __HAVE_GENERIC_START 1059 1060 /* 1061 * Turn on caches and set SCTLR/ACTLR 1062 */ 1063 cpu_setup(boot_args); 1064 #endif 1065 1066 VPRINTF(" ttb"); 1067 1068 #ifdef ARM_MMU_EXTENDED 1069 /* 1070 * TTBCR should have been initialized by the MD start code. 1071 */ 1072 KASSERT((armreg_contextidr_read() & 0xff) == 0); 1073 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 1074 /* 1075 * Disable lookups via TTBR0 until there is an activated pmap. 1076 */ 1077 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 1078 cpu_setttb(l1pt_pa, KERNEL_PID); 1079 isb(); 1080 #else 1081 cpu_setttb(l1pt_pa, true); 1082 #endif 1083 1084 cpu_tlb_flushID(); 1085 1086 #ifdef KASAN 1087 extern uint8_t start_stacks_bottom[]; 1088 kasan_early_init((void *)start_stacks_bottom); 1089 #endif 1090 1091 #ifdef ARM_MMU_EXTENDED 1092 VPRINTF("\nsctlr=%#x actlr=%#x\n", 1093 armreg_sctlr_read(), armreg_auxctl_read()); 1094 #else 1095 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read()); 1096 #endif 1097 1098 #ifdef MULTIPROCESSOR 1099 #ifndef __HAVE_GENERIC_START 1100 /* 1101 * Kick the secondaries to load the TTB. After which they'll go 1102 * back to sleep to wait for the final kick so they will hatch. 1103 */ 1104 VPRINTF(" hatchlings"); 1105 cpu_boot_secondary_processors(); 1106 #endif 1107 #endif 1108 1109 VPRINTF(" OK\n"); 1110 } 1111