arm32_kvminit.c revision 1.64 1 /* $NetBSD: arm32_kvminit.c,v 1.64 2020/07/10 12:25:09 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
5 * Written by Hiroyuki Bessho for Genetec Corporation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Genetec Corporation may not be used to endorse or
16 * promote products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Copyright (c) 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
50 * written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 *
64 * Copyright (c) 1997,1998 Mark Brinicombe.
65 * Copyright (c) 1997,1998 Causality Limited.
66 * All rights reserved.
67 *
68 * Redistribution and use in source and binary forms, with or without
69 * modification, are permitted provided that the following conditions
70 * are met:
71 * 1. Redistributions of source code must retain the above copyright
72 * notice, this list of conditions and the following disclaimer.
73 * 2. Redistributions in binary form must reproduce the above copyright
74 * notice, this list of conditions and the following disclaimer in the
75 * documentation and/or other materials provided with the distribution.
76 * 3. All advertising materials mentioning features or use of this software
77 * must display the following acknowledgement:
78 * This product includes software developed by Mark Brinicombe
79 * for the NetBSD Project.
80 * 4. The name of the company nor the name of the author may be used to
81 * endorse or promote products derived from this software without specific
82 * prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
94 * SUCH DAMAGE.
95 *
96 * Copyright (c) 2007 Microsoft
97 * All rights reserved.
98 *
99 * Redistribution and use in source and binary forms, with or without
100 * modification, are permitted provided that the following conditions
101 * are met:
102 * 1. Redistributions of source code must retain the above copyright
103 * notice, this list of conditions and the following disclaimer.
104 * 2. Redistributions in binary form must reproduce the above copyright
105 * notice, this list of conditions and the following disclaimer in the
106 * documentation and/or other materials provided with the distribution.
107 * 3. All advertising materials mentioning features or use of this software
108 * must display the following acknowledgement:
109 * This product includes software developed by Microsoft
110 *
111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
121 * SUCH DAMAGE.
122 */
123
124 #include "opt_arm_debug.h"
125 #include "opt_arm_start.h"
126 #include "opt_fdt.h"
127 #include "opt_multiprocessor.h"
128
129 #include <sys/cdefs.h>
130 __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.64 2020/07/10 12:25:09 skrll Exp $");
131
132 #include <sys/param.h>
133
134 #include <sys/asan.h>
135 #include <sys/bus.h>
136 #include <sys/device.h>
137 #include <sys/kernel.h>
138 #include <sys/reboot.h>
139
140 #include <dev/cons.h>
141
142 #include <uvm/uvm_extern.h>
143
144 #include <arm/arm32/machdep.h>
145 #include <arm/bootconfig.h>
146 #include <arm/db_machdep.h>
147 #include <arm/locore.h>
148 #include <arm/undefined.h>
149
150 #if defined(FDT)
151 #include <arch/evbarm/fdt/platform.h>
152 #include <arm/fdt/arm_fdtvar.h>
153 #endif
154
155 #ifdef MULTIPROCESSOR
156 #ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP
157 #error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack
158 #endif
159 #endif
160
161 #ifdef VERBOSE_INIT_ARM
162 #define VPRINTF(...) printf(__VA_ARGS__)
163 #else
164 #define VPRINTF(...) __nothing
165 #endif
166
167 struct bootmem_info bootmem_info;
168
169 extern void *msgbufaddr;
170 paddr_t msgbufphys;
171 paddr_t physical_start;
172 paddr_t physical_end;
173
174 extern char etext[];
175 extern char __data_start[], _edata[];
176 extern char __bss_start[], __bss_end__[];
177 extern char _end[];
178
179 /* Page tables for mapping kernel VM */
180 #define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */
181
182 #ifdef KASAN
183 vaddr_t kasan_kernelstart;
184 vaddr_t kasan_kernelsize;
185
186 #define KERNEL_L2PT_KASAN_NUM howmany(VM_KERNEL_KASAN_SIZE, L2_S_SEGSIZE)
187 pv_addr_t kasan_l2pt[KERNEL_L2PT_KASAN_NUM];
188 #else
189 #define KERNEL_L2PT_KASAN_NUM 0
190 #endif
191
192 u_long kern_vtopdiff __attribute__((__section__(".data")));
193
194 void
195 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
196 {
197 struct bootmem_info * const bmi = &bootmem_info;
198 pv_addr_t *pv = bmi->bmi_freeblocks;
199
200 /*
201 * FDT/generic start fills in kern_vtopdiff early
202 */
203 #if defined(__HAVE_GENERIC_START)
204 extern char KERNEL_BASE_virt[];
205 extern char const __stop__init_memory[];
206
207 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff);
208
209 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt);
210 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory);
211
212 kernelstart = KERN_VTOPHYS(kstartva);
213
214 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart);
215 #else
216 vaddr_t kendva = round_page((vaddr_t)_end);
217
218 #if defined(KERNEL_BASE_VOFFSET)
219 kern_vtopdiff = KERNEL_BASE_VOFFSET;
220 #else
221 KASSERT(memstart == kernelstart);
222 kern_vtopdiff = KERNEL_BASE + memstart;
223 #endif
224 #endif
225 paddr_t kernelend = KERN_VTOPHYS(kendva);
226
227 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__,
228 memstart, memsize);
229 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__,
230 kernelstart, kernelend);
231
232 physical_start = bmi->bmi_start = memstart;
233 physical_end = bmi->bmi_end = memstart + memsize;
234 #ifndef ARM_HAS_LPAE
235 if (physical_end == 0) {
236 physical_end = -PAGE_SIZE;
237 memsize -= PAGE_SIZE;
238 bmi->bmi_end -= PAGE_SIZE;
239 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n",
240 __func__);
241 }
242 #endif
243 physmem = memsize / PAGE_SIZE;
244
245 /*
246 * Let's record where the kernel lives.
247 */
248
249 bmi->bmi_kernelstart = kernelstart;
250 bmi->bmi_kernelend = kernelend;
251
252 #if defined(FDT)
253 fdt_add_reserved_memory_range(bmi->bmi_kernelstart,
254 bmi->bmi_kernelend - bmi->bmi_kernelstart);
255 #endif
256
257 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart,
258 kernelend);
259
260 #if 0
261 // XXX Makes RPI abort
262 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0);
263 #endif
264 /*
265 * Now the rest of the free memory must be after the kernel.
266 */
267 pv->pv_pa = bmi->bmi_kernelend;
268 pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
269 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
270 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
271 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
272 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
273 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
274 pv++;
275
276 /*
277 * Add a free block for any memory before the kernel.
278 */
279 if (bmi->bmi_start < bmi->bmi_kernelstart) {
280 pv->pv_pa = bmi->bmi_start;
281 pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
282 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa;
283 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
284 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
285 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
286 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
287 pv++;
288 }
289
290 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
291
292 SLIST_INIT(&bmi->bmi_freechunks);
293 SLIST_INIT(&bmi->bmi_chunks);
294 }
295
296 static bool
297 concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
298 {
299 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
300 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
301 && acc_pv->pv_prot == pv->pv_prot
302 && acc_pv->pv_cache == pv->pv_cache) {
303 #if 0
304 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
305 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size,
306 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size);
307 #endif
308 acc_pv->pv_size += pv->pv_size;
309 return true;
310 }
311
312 return false;
313 }
314
315 static void
316 add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
317 {
318 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
319 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) {
320 pv_addr_t * const pv0 = (*pvp);
321 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
322 if (concat_pvaddr(pv0, pv)) {
323 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
324 __func__, "appending", pv,
325 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
326 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
327 pv = SLIST_NEXT(pv0, pv_list);
328 if (pv != NULL && concat_pvaddr(pv0, pv)) {
329 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
330 __func__, "merging", pv,
331 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
332 pv0->pv_pa,
333 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
334 SLIST_REMOVE_AFTER(pv0, pv_list);
335 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
336 }
337 return;
338 }
339 KASSERT(pv->pv_va != (*pvp)->pv_va);
340 pvp = &SLIST_NEXT(*pvp, pv_list);
341 }
342 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
343 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
344 KASSERT(new_pv != NULL);
345 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
346 *new_pv = *pv;
347 SLIST_NEXT(new_pv, pv_list) = *pvp;
348 (*pvp) = new_pv;
349
350 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
351 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
352 new_pv->pv_size / PAGE_SIZE);
353 if (SLIST_NEXT(new_pv, pv_list)) {
354 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
355 } else {
356 VPRINTF("at tail\n");
357 }
358 }
359
360 static void
361 valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
362 int prot, int cache, bool zero_p)
363 {
364 size_t nbytes = npages * PAGE_SIZE;
365 pv_addr_t *free_pv = bmi->bmi_freeblocks;
366 size_t free_idx = 0;
367 static bool l1pt_found;
368
369 KASSERT(npages > 0);
370
371 /*
372 * If we haven't allocated the kernel L1 page table and we are aligned
373 * at a L1 table boundary, alloc the memory for it.
374 */
375 if (!l1pt_found
376 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
377 && free_pv->pv_size >= L1_TABLE_SIZE) {
378 l1pt_found = true;
379 VPRINTF(" l1pt");
380
381 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
382 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
383 add_pages(bmi, &kernel_l1pt);
384 }
385
386 while (nbytes > free_pv->pv_size) {
387 free_pv++;
388 free_idx++;
389 if (free_idx == bmi->bmi_nfreeblocks) {
390 panic("%s: could not allocate %zu bytes",
391 __func__, nbytes);
392 }
393 }
394
395 /*
396 * As we allocate the memory, make sure that we don't walk over
397 * our current first level translation table.
398 */
399 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
400
401 #if defined(FDT)
402 fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes);
403 #endif
404 pv->pv_pa = free_pv->pv_pa;
405 pv->pv_va = free_pv->pv_va;
406 pv->pv_size = nbytes;
407 pv->pv_prot = prot;
408 pv->pv_cache = cache;
409
410 /*
411 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
412 * just use PTE_CACHE.
413 */
414 if (cache == PTE_PAGETABLE
415 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
416 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
417 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
418 pv->pv_cache = PTE_CACHE;
419
420 free_pv->pv_pa += nbytes;
421 free_pv->pv_va += nbytes;
422 free_pv->pv_size -= nbytes;
423 if (free_pv->pv_size == 0) {
424 --bmi->bmi_nfreeblocks;
425 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
426 free_pv[0] = free_pv[1];
427 }
428 }
429
430 bmi->bmi_freepages -= npages;
431
432 if (zero_p)
433 memset((void *)pv->pv_va, 0, nbytes);
434 }
435
436 void
437 arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
438 const struct pmap_devmap *devmap, bool mapallmem_p)
439 {
440 struct bootmem_info * const bmi = &bootmem_info;
441 #ifdef MULTIPROCESSOR
442 const size_t cpu_num = arm_cpu_max;
443 #else
444 const size_t cpu_num = 1;
445 #endif
446
447 #ifdef ARM_HAS_VBAR
448 const bool map_vectors_p = false;
449 #elif defined(CPU_ARMV7) || defined(CPU_ARM11)
450 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH
451 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0;
452 #else
453 const bool map_vectors_p = true;
454 #endif
455
456 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
457 KASSERT(mapallmem_p);
458 #ifdef ARM_MMU_EXTENDED
459 /*
460 * The direct map VA space ends at the start of the kernel VM space.
461 */
462 pmap_directlimit = kernel_vm_base;
463 #else
464 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start);
465 #endif /* ARM_MMU_EXTENDED */
466 #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
467
468 /*
469 * Calculate the number of L2 pages needed for mapping the
470 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors,
471 * and 1 for IO
472 */
473 size_t kernel_size = bmi->bmi_kernelend;
474 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
475 kernel_size += L1_TABLE_SIZE;
476 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM;
477 kernel_size += PAGE_SIZE * KERNEL_L2PT_KASAN_NUM;
478 if (map_vectors_p) {
479 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */
480 }
481 if (iovbase) {
482 kernel_size += PAGE_SIZE; /* L2PT for IO */
483 }
484 kernel_size +=
485 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
486 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
487 kernel_size += round_page(MSGBUFSIZE);
488 kernel_size += 0x10000; /* slop */
489 if (!mapallmem_p) {
490 kernel_size += PAGE_SIZE
491 * howmany(kernel_size, L2_S_SEGSIZE);
492 }
493 kernel_size = round_page(kernel_size);
494
495 /*
496 * Now we know how many L2 pages it will take.
497 */
498 const size_t KERNEL_L2PT_KERNEL_NUM =
499 howmany(kernel_size, L2_S_SEGSIZE);
500
501 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
502 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
503
504 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
505 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
506 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
507 pv_addr_t msgbuf;
508 pv_addr_t text;
509 pv_addr_t data;
510 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11];
511 #if ARM_MMU_XSCALE == 1
512 pv_addr_t minidataclean;
513 #endif
514
515 /*
516 * We need to allocate some fixed page tables to get the kernel going.
517 *
518 * We are going to allocate our bootstrap pages from the beginning of
519 * the free space that we just calculated. We allocate one page
520 * directory and a number of page tables and store the physical
521 * addresses in the bmi_l2pts array in bootmem_info.
522 *
523 * The kernel page directory must be on a 16K boundary. The page
524 * tables must be on 4K boundaries. What we do is allocate the
525 * page directory on the first 16K boundary that we encounter, and
526 * the page tables on 4K boundaries otherwise. Since we allocate
527 * at least 3 L2 page tables, we are guaranteed to encounter at
528 * least one 16K aligned region.
529 */
530
531 VPRINTF("%s: allocating page tables for", __func__);
532 for (size_t i = 0; i < __arraycount(chunks); i++) {
533 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
534 }
535
536 kernel_l1pt.pv_pa = 0;
537 kernel_l1pt.pv_va = 0;
538
539 /*
540 * Allocate the L2 pages, but if we get to a page that is aligned for
541 * an L1 page table, we will allocate the pages for it first and then
542 * allocate the L2 page.
543 */
544
545 if (map_vectors_p) {
546 /*
547 * First allocate L2 page for the vectors.
548 */
549 VPRINTF(" vector");
550 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1,
551 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
552 add_pages(bmi, &bmi->bmi_vector_l2pt);
553 }
554
555 /*
556 * Now allocate L2 pages for the kernel
557 */
558 VPRINTF(" kernel");
559 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
560 valloc_pages(bmi, &kernel_l2pt[idx], 1,
561 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
562 add_pages(bmi, &kernel_l2pt[idx]);
563 }
564
565 /*
566 * Now allocate L2 pages for the initial kernel VA space.
567 */
568 VPRINTF(" vm");
569 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
570 valloc_pages(bmi, &vmdata_l2pt[idx], 1,
571 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
572 add_pages(bmi, &vmdata_l2pt[idx]);
573 }
574
575 #ifdef KASAN
576 /*
577 * Now allocate L2 pages for the KASAN shadow map l2pt VA space.
578 */
579 VPRINTF(" kasan");
580 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; ++idx) {
581 valloc_pages(bmi, &kasan_l2pt[idx], 1,
582 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
583 add_pages(bmi, &kasan_l2pt[idx]);
584 }
585
586 #endif
587 /*
588 * If someone wanted a L2 page for I/O, allocate it now.
589 */
590 if (iovbase) {
591 VPRINTF(" io");
592 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1,
593 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
594 add_pages(bmi, &bmi->bmi_io_l2pt);
595 }
596
597 VPRINTF("%s: allocating stacks\n", __func__);
598
599 /* Allocate stacks for all modes and CPUs */
600 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
601 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
602 add_pages(bmi, &abtstack);
603 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
604 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
605 add_pages(bmi, &fiqstack);
606 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
607 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
608 add_pages(bmi, &irqstack);
609 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
610 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
611 add_pages(bmi, &undstack);
612 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */
613 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
614 add_pages(bmi, &idlestack);
615 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */
616 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
617 add_pages(bmi, &kernelstack);
618
619 /* Allocate the message buffer from the end of memory. */
620 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
621 valloc_pages(bmi, &msgbuf, msgbuf_pgs,
622 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, false);
623 add_pages(bmi, &msgbuf);
624 msgbufphys = msgbuf.pv_pa;
625 msgbufaddr = (void *)msgbuf.pv_va;
626
627 #ifdef KASAN
628 kasan_kernelstart = KERNEL_BASE;
629 kasan_kernelsize = (msgbuf.pv_va + round_page(MSGBUFSIZE)) - KERNEL_BASE;
630 #endif
631
632 if (map_vectors_p) {
633 /*
634 * Allocate a page for the system vector page.
635 * This page will just contain the system vectors and can be
636 * shared by all processes.
637 */
638 VPRINTF(" vector");
639
640 valloc_pages(bmi, &systempage, 1,
641 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
642 PTE_CACHE, true);
643 }
644 systempage.pv_va = vectors;
645
646 /*
647 * If the caller needed a few extra pages for some reason, allocate
648 * them now.
649 */
650 #if ARM_MMU_XSCALE == 1
651 #if (ARM_NMMUS > 1)
652 if (xscale_use_minidata)
653 #endif
654 valloc_pages(bmi, &minidataclean, 1,
655 VM_PROT_READ | VM_PROT_WRITE, 0, true);
656 #endif
657
658 /*
659 * Ok we have allocated physical pages for the primary kernel
660 * page tables and stacks. Let's just confirm that.
661 */
662 if (kernel_l1pt.pv_va == 0
663 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
664 panic("%s: Failed to allocate or align the kernel "
665 "page directory", __func__);
666
667 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n",
668 kernel_l1pt.pv_va, kernel_l1pt.pv_pa);
669
670 /*
671 * Now we start construction of the L1 page table
672 * We start by mapping the L2 page tables into the L1.
673 * This means that we can replace L1 mappings later on if necessary
674 */
675 vaddr_t l1pt_va = kernel_l1pt.pv_va;
676 paddr_t l1pt_pa = kernel_l1pt.pv_pa;
677
678 if (map_vectors_p) {
679 /* Map the L2 pages tables in the L1 page table */
680 const vaddr_t va = systempage.pv_va & -L2_S_SEGSIZE;
681
682 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_vector_l2pt);
683
684 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
685 __func__, bmi->bmi_vector_l2pt.pv_va,
686 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va, "(vectors)");
687 }
688
689 /*
690 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel
691 * start PA
692 */
693 const vaddr_t kernel_base =
694 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE);
695
696 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__,
697 kernel_base, KERNEL_L2PT_KERNEL_NUM);
698
699 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
700 const vaddr_t va = kernel_base + idx * L2_S_SEGSIZE;
701
702 pmap_link_l2pt(l1pt_va, va, &kernel_l2pt[idx]);
703
704 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
705 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa,
706 va, "(kernel)");
707 }
708
709 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__,
710 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM);
711
712 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
713 const vaddr_t va = kernel_vm_base + idx * L2_S_SEGSIZE;
714
715 pmap_link_l2pt(l1pt_va, va, &vmdata_l2pt[idx]);
716
717 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
718 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
719 va, "(vm)");
720 }
721 if (iovbase) {
722 const vaddr_t va = iovbase & -L2_S_SEGSIZE;
723
724 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_io_l2pt);
725
726 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
727 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
728 va, "(io)");
729 }
730
731 #ifdef KASAN
732 VPRINTF("%s: kasan_shadow_base %x KERNEL_L2PT_KASAN_NUM %d\n", __func__,
733 VM_KERNEL_KASAN_BASE, KERNEL_L2PT_KASAN_NUM);
734
735 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; idx++) {
736 const vaddr_t va = VM_KERNEL_KASAN_BASE + idx * L2_S_SEGSIZE;
737
738 pmap_link_l2pt(l1pt_va, va, &kasan_l2pt[idx]);
739
740 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
741 __func__, kasan_l2pt[idx].pv_va, kasan_l2pt[idx].pv_pa,
742 va, "(kasan)");
743 }
744 #endif
745
746 /* update the top of the kernel VM */
747 pmap_curmaxkvaddr =
748 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
749
750 // This could be done earlier and then the kernel data and pages
751 // allocated above would get merged (concatentated)
752
753 VPRINTF("Mapping kernel\n");
754
755 extern char etext[];
756 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
757 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart;
758
759 textsize = (textsize + PGOFSET) & ~PGOFSET;
760
761 /* start at offset of kernel in RAM */
762
763 text.pv_pa = bmi->bmi_kernelstart;
764 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart);
765 text.pv_size = textsize;
766 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE;
767 text.pv_cache = PTE_CACHE;
768
769 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
770 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
771
772 add_pages(bmi, &text);
773
774 data.pv_pa = text.pv_pa + textsize;
775 data.pv_va = text.pv_va + textsize;
776 data.pv_size = totalsize - textsize;
777 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
778 data.pv_cache = PTE_CACHE;
779
780 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
781 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
782
783 add_pages(bmi, &data);
784
785 VPRINTF("Listing Chunks\n");
786
787 pv_addr_t *lpv;
788 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) {
789 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx "
790 "(PA %#lx, prot %d, cache %d)\n",
791 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1,
792 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache);
793 }
794 VPRINTF("\nMapping Chunks\n");
795
796 pv_addr_t cur_pv;
797 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
798 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
799 cur_pv = *pv;
800 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va);
801 pv = SLIST_NEXT(pv, pv_list);
802 } else {
803 cur_pv.pv_va = KERNEL_BASE;
804 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va);
805 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa;
806 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
807 cur_pv.pv_cache = PTE_CACHE;
808 }
809 while (pv != NULL) {
810 if (mapallmem_p) {
811 if (concat_pvaddr(&cur_pv, pv)) {
812 pv = SLIST_NEXT(pv, pv_list);
813 continue;
814 }
815 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
816 /*
817 * See if we can extend the current pv to emcompass the
818 * hole, and if so do it and retry the concatenation.
819 */
820 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
821 && cur_pv.pv_cache == PTE_CACHE) {
822 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
823 continue;
824 }
825
826 /*
827 * We couldn't so emit the current chunk and then
828 */
829 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
830 "(PA %#lx, prot %d, cache %d)\n",
831 __func__,
832 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
833 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
834 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
835 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
836
837 /*
838 * set the current chunk to the hole and try again.
839 */
840 cur_pv.pv_pa += cur_pv.pv_size;
841 cur_pv.pv_va += cur_pv.pv_size;
842 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
843 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
844 cur_pv.pv_cache = PTE_CACHE;
845 continue;
846 }
847 }
848
849 /*
850 * The new pv didn't concatenate so emit the current one
851 * and use the new pv as the current pv.
852 */
853 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
854 "(PA %#lx, prot %d, cache %d)\n",
855 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
856 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
857 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
858 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
859 cur_pv = *pv;
860 pv = SLIST_NEXT(pv, pv_list);
861 }
862
863 /*
864 * If we are mapping all of memory, let's map the rest of memory.
865 */
866 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
867 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
868 && cur_pv.pv_cache == PTE_CACHE) {
869 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
870 } else {
871 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base,
872 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size,
873 kernel_vm_base);
874 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
875 "(PA %#lx, prot %d, cache %d)\n",
876 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
877 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
878 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
879 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
880 cur_pv.pv_pa += cur_pv.pv_size;
881 cur_pv.pv_va += cur_pv.pv_size;
882 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
883 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
884 cur_pv.pv_cache = PTE_CACHE;
885 }
886 }
887
888 /*
889 * The amount we can direct map is limited by the start of the
890 * virtual part of the kernel address space. Don't overrun
891 * into it.
892 */
893 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) {
894 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va;
895 }
896
897 /*
898 * Now we map the final chunk.
899 */
900 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
901 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
902 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
903 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
904 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
905
906 /*
907 * Now we map the stuff that isn't directly after the kernel
908 */
909 if (map_vectors_p) {
910 /* Map the vector page. */
911 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
912 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, PTE_CACHE);
913 }
914
915 /* Map the Mini-Data cache clean area. */
916 #if ARM_MMU_XSCALE == 1
917 #if (ARM_NMMUS > 1)
918 if (xscale_use_minidata)
919 #endif
920 xscale_setup_minidata(l1pt_va, minidataclean.pv_va,
921 minidataclean.pv_pa);
922 #endif
923
924 /*
925 * Map integrated peripherals at same address in first level page
926 * table so that we can continue to use console.
927 */
928 if (devmap)
929 pmap_devmap_bootstrap(l1pt_va, devmap);
930
931 /* Tell the user about where all the bits and pieces live. */
932 VPRINTF("%22s Physical Virtual Num\n", " ");
933 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " ");
934
935 #ifdef VERBOSE_INIT_ARM
936 static const char mem_fmt[] =
937 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
938 static const char mem_fmt_nov[] =
939 "%20s: 0x%08lx 0x%08lx %zu\n";
940 #endif
941
942 #if 0
943 // XXX Doesn't make sense if kernel not at bottom of RAM
944 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
945 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1),
946 (int)physmem);
947 #endif
948 VPRINTF(mem_fmt, "text section",
949 text.pv_pa, text.pv_pa + text.pv_size - 1,
950 text.pv_va, text.pv_va + text.pv_size - 1,
951 (int)(text.pv_size / PAGE_SIZE));
952 VPRINTF(mem_fmt, "data section",
953 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata),
954 (vaddr_t)__data_start, (vaddr_t)_edata,
955 (int)((round_page((vaddr_t)_edata)
956 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
957 VPRINTF(mem_fmt, "bss section",
958 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__),
959 (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
960 (int)((round_page((vaddr_t)__bss_end__)
961 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
962 VPRINTF(mem_fmt, "L1 page directory",
963 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
964 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
965 L1_TABLE_SIZE / PAGE_SIZE);
966 VPRINTF(mem_fmt, "ABT stack (CPU 0)",
967 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
968 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
969 ABT_STACK_SIZE);
970 VPRINTF(mem_fmt, "FIQ stack (CPU 0)",
971 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
972 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
973 FIQ_STACK_SIZE);
974 VPRINTF(mem_fmt, "IRQ stack (CPU 0)",
975 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
976 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
977 IRQ_STACK_SIZE);
978 VPRINTF(mem_fmt, "UND stack (CPU 0)",
979 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
980 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
981 UND_STACK_SIZE);
982 VPRINTF(mem_fmt, "IDLE stack (CPU 0)",
983 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
984 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
985 UPAGES);
986 VPRINTF(mem_fmt, "SVC stack",
987 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
988 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
989 UPAGES);
990 VPRINTF(mem_fmt, "Message Buffer",
991 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
992 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
993 (int)msgbuf_pgs);
994 if (map_vectors_p) {
995 VPRINTF(mem_fmt, "Exception Vectors",
996 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
997 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
998 1);
999 }
1000 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
1001 pv = &bmi->bmi_freeblocks[i];
1002
1003 VPRINTF(mem_fmt_nov, "Free Memory",
1004 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
1005 pv->pv_size / PAGE_SIZE);
1006 }
1007 /*
1008 * Now we have the real page tables in place so we can switch to them.
1009 * Once this is done we will be running with the REAL kernel page
1010 * tables.
1011 */
1012
1013 VPRINTF("TTBR0=%#x", armreg_ttbr_read());
1014 #ifdef _ARM_ARCH_6
1015 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x",
1016 armreg_ttbr1_read(), armreg_ttbcr_read(),
1017 armreg_contextidr_read());
1018 #endif
1019 VPRINTF("\n");
1020
1021 /* Switch tables */
1022 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa);
1023
1024 cpu_ttb = l1pt_pa;
1025
1026 cpu_domains(DOMAIN_DEFAULT);
1027
1028 cpu_idcache_wbinv_all();
1029
1030 #ifdef __HAVE_GENERIC_START
1031
1032 /*
1033 * Turn on caches and set SCTLR/ACTLR
1034 */
1035 cpu_setup(boot_args);
1036 #endif
1037
1038 VPRINTF(" ttb");
1039
1040 #ifdef ARM_MMU_EXTENDED
1041 /*
1042 * TTBCR should have been initialized by the MD start code.
1043 */
1044 KASSERT((armreg_contextidr_read() & 0xff) == 0);
1045 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N));
1046 /*
1047 * Disable lookups via TTBR0 until there is an activated pmap.
1048 */
1049 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0);
1050 cpu_setttb(l1pt_pa, KERNEL_PID);
1051 arm_isb();
1052 #else
1053 cpu_setttb(l1pt_pa, true);
1054 #endif
1055
1056 cpu_tlb_flushID();
1057
1058 #ifdef KASAN
1059 extern uint8_t start_stacks_bottom[];
1060 kasan_early_init((void *)start_stacks_bottom);
1061 #endif
1062
1063 #ifdef ARM_MMU_EXTENDED
1064 VPRINTF("\nsctlr=%#x actlr=%#x\n",
1065 armreg_sctlr_read(), armreg_auxctl_read());
1066 #else
1067 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read());
1068 #endif
1069
1070 #ifdef MULTIPROCESSOR
1071 #ifndef __HAVE_GENERIC_START
1072 /*
1073 * Kick the secondaries to load the TTB. After which they'll go
1074 * back to sleep to wait for the final kick so they will hatch.
1075 */
1076 VPRINTF(" hatchlings");
1077 cpu_boot_secondary_processors();
1078 #endif
1079 #endif
1080
1081 VPRINTF(" OK\n");
1082 }
1083