arm32_kvminit.c revision 1.52 1 1.52 skrll /* $NetBSD: arm32_kvminit.c,v 1.52 2019/02/06 13:28:08 skrll Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
5 1.1 matt * Written by Hiroyuki Bessho for Genetec Corporation.
6 1.1 matt *
7 1.1 matt * Redistribution and use in source and binary forms, with or without
8 1.1 matt * modification, are permitted provided that the following conditions
9 1.1 matt * are met:
10 1.1 matt * 1. Redistributions of source code must retain the above copyright
11 1.1 matt * notice, this list of conditions and the following disclaimer.
12 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer in the
14 1.1 matt * documentation and/or other materials provided with the distribution.
15 1.1 matt * 3. The name of Genetec Corporation may not be used to endorse or
16 1.1 matt * promote products derived from this software without specific prior
17 1.1 matt * written permission.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
20 1.1 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt *
31 1.1 matt * Copyright (c) 2001 Wasabi Systems, Inc.
32 1.1 matt * All rights reserved.
33 1.1 matt *
34 1.1 matt * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 1.1 matt *
36 1.1 matt * Redistribution and use in source and binary forms, with or without
37 1.1 matt * modification, are permitted provided that the following conditions
38 1.1 matt * are met:
39 1.1 matt * 1. Redistributions of source code must retain the above copyright
40 1.1 matt * notice, this list of conditions and the following disclaimer.
41 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 matt * notice, this list of conditions and the following disclaimer in the
43 1.1 matt * documentation and/or other materials provided with the distribution.
44 1.1 matt * 3. All advertising materials mentioning features or use of this software
45 1.1 matt * must display the following acknowledgement:
46 1.1 matt * This product includes software developed for the NetBSD Project by
47 1.1 matt * Wasabi Systems, Inc.
48 1.1 matt * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 1.1 matt * or promote products derived from this software without specific prior
50 1.1 matt * written permission.
51 1.1 matt *
52 1.1 matt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 1.1 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
63 1.1 matt *
64 1.1 matt * Copyright (c) 1997,1998 Mark Brinicombe.
65 1.1 matt * Copyright (c) 1997,1998 Causality Limited.
66 1.1 matt * All rights reserved.
67 1.1 matt *
68 1.1 matt * Redistribution and use in source and binary forms, with or without
69 1.1 matt * modification, are permitted provided that the following conditions
70 1.1 matt * are met:
71 1.1 matt * 1. Redistributions of source code must retain the above copyright
72 1.1 matt * notice, this list of conditions and the following disclaimer.
73 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
74 1.1 matt * notice, this list of conditions and the following disclaimer in the
75 1.1 matt * documentation and/or other materials provided with the distribution.
76 1.1 matt * 3. All advertising materials mentioning features or use of this software
77 1.1 matt * must display the following acknowledgement:
78 1.1 matt * This product includes software developed by Mark Brinicombe
79 1.1 matt * for the NetBSD Project.
80 1.1 matt * 4. The name of the company nor the name of the author may be used to
81 1.1 matt * endorse or promote products derived from this software without specific
82 1.1 matt * prior written permission.
83 1.1 matt *
84 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
85 1.1 matt * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
86 1.1 matt * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
87 1.1 matt * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
88 1.1 matt * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
89 1.1 matt * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
90 1.1 matt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 1.1 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 1.1 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 1.1 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
94 1.1 matt * SUCH DAMAGE.
95 1.1 matt *
96 1.1 matt * Copyright (c) 2007 Microsoft
97 1.1 matt * All rights reserved.
98 1.1 matt *
99 1.1 matt * Redistribution and use in source and binary forms, with or without
100 1.1 matt * modification, are permitted provided that the following conditions
101 1.1 matt * are met:
102 1.1 matt * 1. Redistributions of source code must retain the above copyright
103 1.1 matt * notice, this list of conditions and the following disclaimer.
104 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
105 1.1 matt * notice, this list of conditions and the following disclaimer in the
106 1.1 matt * documentation and/or other materials provided with the distribution.
107 1.1 matt * 3. All advertising materials mentioning features or use of this software
108 1.1 matt * must display the following acknowledgement:
109 1.1 matt * This product includes software developed by Microsoft
110 1.1 matt *
111 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
112 1.1 matt * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
113 1.1 matt * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
114 1.1 matt * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
115 1.1 matt * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 1.1 matt * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
117 1.1 matt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
118 1.1 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
119 1.1 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
120 1.1 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
121 1.1 matt * SUCH DAMAGE.
122 1.1 matt */
123 1.1 matt
124 1.42 skrll #include "opt_arm_debug.h"
125 1.46 skrll #include "opt_arm_start.h"
126 1.41 skrll #include "opt_fdt.h"
127 1.32 skrll #include "opt_multiprocessor.h"
128 1.32 skrll
129 1.1 matt #include <sys/cdefs.h>
130 1.52 skrll __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.52 2019/02/06 13:28:08 skrll Exp $");
131 1.1 matt
132 1.1 matt #include <sys/param.h>
133 1.1 matt #include <sys/device.h>
134 1.1 matt #include <sys/kernel.h>
135 1.1 matt #include <sys/reboot.h>
136 1.1 matt #include <sys/bus.h>
137 1.1 matt
138 1.1 matt #include <dev/cons.h>
139 1.1 matt
140 1.1 matt #include <uvm/uvm_extern.h>
141 1.1 matt
142 1.24 matt #include <arm/locore.h>
143 1.1 matt #include <arm/db_machdep.h>
144 1.1 matt #include <arm/undefined.h>
145 1.1 matt #include <arm/bootconfig.h>
146 1.1 matt #include <arm/arm32/machdep.h>
147 1.1 matt
148 1.41 skrll #if defined(FDT)
149 1.41 skrll #include <arch/evbarm/fdt/platform.h>
150 1.46 skrll #include <arm/fdt/arm_fdtvar.h>
151 1.41 skrll #endif
152 1.41 skrll
153 1.39 skrll #ifdef MULTIPROCESSOR
154 1.39 skrll #ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP
155 1.39 skrll #error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack
156 1.39 skrll #endif
157 1.39 skrll #endif
158 1.39 skrll
159 1.42 skrll #ifdef VERBOSE_INIT_ARM
160 1.42 skrll #define VPRINTF(...) printf(__VA_ARGS__)
161 1.42 skrll #else
162 1.45 skrll #define VPRINTF(...) __nothing
163 1.42 skrll #endif
164 1.42 skrll
165 1.1 matt struct bootmem_info bootmem_info;
166 1.1 matt
167 1.27 matt extern void *msgbufaddr;
168 1.1 matt paddr_t msgbufphys;
169 1.1 matt paddr_t physical_start;
170 1.1 matt paddr_t physical_end;
171 1.1 matt
172 1.1 matt extern char etext[];
173 1.1 matt extern char __data_start[], _edata[];
174 1.1 matt extern char __bss_start[], __bss_end__[];
175 1.1 matt extern char _end[];
176 1.1 matt
177 1.1 matt /* Page tables for mapping kernel VM */
178 1.1 matt #define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */
179 1.1 matt
180 1.44 skrll u_long kern_vtopdiff __attribute__((__section__(".data")));
181 1.1 matt
182 1.1 matt void
183 1.1 matt arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
184 1.1 matt {
185 1.1 matt struct bootmem_info * const bmi = &bootmem_info;
186 1.1 matt pv_addr_t *pv = bmi->bmi_freeblocks;
187 1.1 matt
188 1.44 skrll /*
189 1.46 skrll * FDT/generic start fills in kern_vtopdiff early
190 1.44 skrll */
191 1.46 skrll #if defined(__HAVE_GENERIC_START)
192 1.46 skrll extern char KERNEL_BASE_virt[];
193 1.52 skrll extern char const __stop__init_memory[];
194 1.46 skrll
195 1.46 skrll VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff);
196 1.46 skrll
197 1.46 skrll vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt);
198 1.52 skrll vaddr_t kendva = round_page((vaddr_t)__stop__init_memory);
199 1.46 skrll
200 1.46 skrll kernelstart = KERN_VTOPHYS(kstartva);
201 1.46 skrll
202 1.46 skrll VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart);
203 1.46 skrll #else
204 1.46 skrll vaddr_t kendva = round_page((vaddr_t)_end);
205 1.46 skrll
206 1.44 skrll #if defined(KERNEL_BASE_VOFFSET)
207 1.44 skrll kern_vtopdiff = KERNEL_BASE_VOFFSET;
208 1.44 skrll #else
209 1.44 skrll KASSERT(memstart == kernelstart);
210 1.44 skrll kern_vtopdiff = KERNEL_BASE + memstart;
211 1.44 skrll #endif
212 1.46 skrll #endif
213 1.46 skrll paddr_t kernelend = KERN_VTOPHYS(kendva);
214 1.44 skrll
215 1.51 skrll VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__,
216 1.51 skrll memstart, memsize);
217 1.46 skrll VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__,
218 1.46 skrll kernelstart, kernelend);
219 1.1 matt
220 1.1 matt physical_start = bmi->bmi_start = memstart;
221 1.1 matt physical_end = bmi->bmi_end = memstart + memsize;
222 1.33 matt #ifndef ARM_HAS_LPAE
223 1.33 matt if (physical_end == 0) {
224 1.33 matt physical_end = -PAGE_SIZE;
225 1.33 matt memsize -= PAGE_SIZE;
226 1.34 matt bmi->bmi_end -= PAGE_SIZE;
227 1.42 skrll VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n",
228 1.33 matt __func__);
229 1.33 matt }
230 1.33 matt #endif
231 1.1 matt physmem = memsize / PAGE_SIZE;
232 1.1 matt
233 1.1 matt /*
234 1.1 matt * Let's record where the kernel lives.
235 1.1 matt */
236 1.46 skrll
237 1.1 matt bmi->bmi_kernelstart = kernelstart;
238 1.46 skrll bmi->bmi_kernelend = kernelend;
239 1.1 matt
240 1.41 skrll #if defined(FDT)
241 1.41 skrll fdt_add_reserved_memory_range(bmi->bmi_kernelstart,
242 1.41 skrll bmi->bmi_kernelend - bmi->bmi_kernelstart);
243 1.41 skrll #endif
244 1.41 skrll
245 1.46 skrll VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart,
246 1.46 skrll kernelend);
247 1.1 matt
248 1.46 skrll #if 0
249 1.46 skrll // XXX Makes RPI abort
250 1.46 skrll KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0);
251 1.46 skrll #endif
252 1.1 matt /*
253 1.1 matt * Now the rest of the free memory must be after the kernel.
254 1.1 matt */
255 1.1 matt pv->pv_pa = bmi->bmi_kernelend;
256 1.44 skrll pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
257 1.1 matt pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
258 1.1 matt bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
259 1.42 skrll VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
260 1.1 matt __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
261 1.1 matt pv->pv_pa + pv->pv_size - 1, pv->pv_va);
262 1.1 matt pv++;
263 1.1 matt
264 1.1 matt /*
265 1.1 matt * Add a free block for any memory before the kernel.
266 1.1 matt */
267 1.1 matt if (bmi->bmi_start < bmi->bmi_kernelstart) {
268 1.1 matt pv->pv_pa = bmi->bmi_start;
269 1.44 skrll pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
270 1.33 matt pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa;
271 1.1 matt bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
272 1.42 skrll VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
273 1.1 matt __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
274 1.1 matt pv->pv_pa + pv->pv_size - 1, pv->pv_va);
275 1.1 matt pv++;
276 1.1 matt }
277 1.1 matt
278 1.1 matt bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
279 1.36 skrll
280 1.1 matt SLIST_INIT(&bmi->bmi_freechunks);
281 1.1 matt SLIST_INIT(&bmi->bmi_chunks);
282 1.1 matt }
283 1.1 matt
284 1.1 matt static bool
285 1.1 matt concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
286 1.1 matt {
287 1.1 matt if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
288 1.1 matt && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
289 1.1 matt && acc_pv->pv_prot == pv->pv_prot
290 1.1 matt && acc_pv->pv_cache == pv->pv_cache) {
291 1.46 skrll #if 0
292 1.42 skrll VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
293 1.46 skrll __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size,
294 1.46 skrll acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size);
295 1.46 skrll #endif
296 1.1 matt acc_pv->pv_size += pv->pv_size;
297 1.1 matt return true;
298 1.1 matt }
299 1.1 matt
300 1.1 matt return false;
301 1.1 matt }
302 1.1 matt
303 1.1 matt static void
304 1.1 matt add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
305 1.1 matt {
306 1.1 matt pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
307 1.14 skrll while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) {
308 1.1 matt pv_addr_t * const pv0 = (*pvp);
309 1.1 matt KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
310 1.1 matt if (concat_pvaddr(pv0, pv)) {
311 1.42 skrll VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
312 1.1 matt __func__, "appending", pv,
313 1.1 matt pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
314 1.1 matt pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
315 1.1 matt pv = SLIST_NEXT(pv0, pv_list);
316 1.1 matt if (pv != NULL && concat_pvaddr(pv0, pv)) {
317 1.42 skrll VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
318 1.1 matt __func__, "merging", pv,
319 1.1 matt pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
320 1.1 matt pv0->pv_pa,
321 1.1 matt pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
322 1.1 matt SLIST_REMOVE_AFTER(pv0, pv_list);
323 1.1 matt SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
324 1.1 matt }
325 1.1 matt return;
326 1.1 matt }
327 1.1 matt KASSERT(pv->pv_va != (*pvp)->pv_va);
328 1.1 matt pvp = &SLIST_NEXT(*pvp, pv_list);
329 1.1 matt }
330 1.1 matt KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
331 1.1 matt pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
332 1.1 matt KASSERT(new_pv != NULL);
333 1.1 matt SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
334 1.1 matt *new_pv = *pv;
335 1.1 matt SLIST_NEXT(new_pv, pv_list) = *pvp;
336 1.1 matt (*pvp) = new_pv;
337 1.42 skrll
338 1.42 skrll VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
339 1.1 matt __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
340 1.1 matt new_pv->pv_size / PAGE_SIZE);
341 1.42 skrll if (SLIST_NEXT(new_pv, pv_list)) {
342 1.42 skrll VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
343 1.42 skrll } else {
344 1.42 skrll VPRINTF("at tail\n");
345 1.42 skrll }
346 1.1 matt }
347 1.1 matt
348 1.1 matt static void
349 1.1 matt valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
350 1.17 matt int prot, int cache, bool zero_p)
351 1.1 matt {
352 1.1 matt size_t nbytes = npages * PAGE_SIZE;
353 1.1 matt pv_addr_t *free_pv = bmi->bmi_freeblocks;
354 1.1 matt size_t free_idx = 0;
355 1.1 matt static bool l1pt_found;
356 1.1 matt
357 1.23 matt KASSERT(npages > 0);
358 1.23 matt
359 1.1 matt /*
360 1.6 skrll * If we haven't allocated the kernel L1 page table and we are aligned
361 1.1 matt * at a L1 table boundary, alloc the memory for it.
362 1.1 matt */
363 1.1 matt if (!l1pt_found
364 1.1 matt && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
365 1.1 matt && free_pv->pv_size >= L1_TABLE_SIZE) {
366 1.1 matt l1pt_found = true;
367 1.46 skrll VPRINTF(" l1pt");
368 1.46 skrll
369 1.1 matt valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
370 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
371 1.1 matt add_pages(bmi, &kernel_l1pt);
372 1.1 matt }
373 1.1 matt
374 1.1 matt while (nbytes > free_pv->pv_size) {
375 1.1 matt free_pv++;
376 1.1 matt free_idx++;
377 1.1 matt if (free_idx == bmi->bmi_nfreeblocks) {
378 1.1 matt panic("%s: could not allocate %zu bytes",
379 1.1 matt __func__, nbytes);
380 1.1 matt }
381 1.1 matt }
382 1.1 matt
383 1.12 skrll /*
384 1.12 skrll * As we allocate the memory, make sure that we don't walk over
385 1.12 skrll * our current first level translation table.
386 1.12 skrll */
387 1.12 skrll KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
388 1.12 skrll
389 1.41 skrll #if defined(FDT)
390 1.41 skrll fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes);
391 1.41 skrll #endif
392 1.1 matt pv->pv_pa = free_pv->pv_pa;
393 1.1 matt pv->pv_va = free_pv->pv_va;
394 1.1 matt pv->pv_size = nbytes;
395 1.1 matt pv->pv_prot = prot;
396 1.1 matt pv->pv_cache = cache;
397 1.1 matt
398 1.1 matt /*
399 1.1 matt * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
400 1.1 matt * just use PTE_CACHE.
401 1.1 matt */
402 1.1 matt if (cache == PTE_PAGETABLE
403 1.1 matt && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
404 1.1 matt && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
405 1.1 matt && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
406 1.1 matt pv->pv_cache = PTE_CACHE;
407 1.1 matt
408 1.1 matt free_pv->pv_pa += nbytes;
409 1.1 matt free_pv->pv_va += nbytes;
410 1.1 matt free_pv->pv_size -= nbytes;
411 1.1 matt if (free_pv->pv_size == 0) {
412 1.1 matt --bmi->bmi_nfreeblocks;
413 1.1 matt for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
414 1.1 matt free_pv[0] = free_pv[1];
415 1.1 matt }
416 1.1 matt }
417 1.1 matt
418 1.1 matt bmi->bmi_freepages -= npages;
419 1.1 matt
420 1.18 matt if (zero_p)
421 1.18 matt memset((void *)pv->pv_va, 0, nbytes);
422 1.1 matt }
423 1.1 matt
424 1.1 matt void
425 1.1 matt arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
426 1.1 matt const struct pmap_devmap *devmap, bool mapallmem_p)
427 1.1 matt {
428 1.1 matt struct bootmem_info * const bmi = &bootmem_info;
429 1.1 matt #ifdef MULTIPROCESSOR
430 1.25 matt const size_t cpu_num = arm_cpu_max;
431 1.1 matt #else
432 1.1 matt const size_t cpu_num = 1;
433 1.1 matt #endif
434 1.46 skrll
435 1.20 matt #ifdef ARM_HAS_VBAR
436 1.20 matt const bool map_vectors_p = false;
437 1.20 matt #elif defined(CPU_ARMV7) || defined(CPU_ARM11)
438 1.21 matt const bool map_vectors_p = vectors == ARM_VECTORS_HIGH
439 1.21 matt || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0;
440 1.19 matt #else
441 1.19 matt const bool map_vectors_p = true;
442 1.19 matt #endif
443 1.1 matt
444 1.15 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
445 1.15 matt KASSERT(mapallmem_p);
446 1.28 matt #ifdef ARM_MMU_EXTENDED
447 1.28 matt /*
448 1.35 matt * The direct map VA space ends at the start of the kernel VM space.
449 1.28 matt */
450 1.34 matt pmap_directlimit = kernel_vm_base;
451 1.28 matt #else
452 1.28 matt KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start);
453 1.28 matt #endif /* ARM_MMU_EXTENDED */
454 1.28 matt #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
455 1.15 matt
456 1.1 matt /*
457 1.1 matt * Calculate the number of L2 pages needed for mapping the
458 1.11 skrll * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors,
459 1.11 skrll * and 1 for IO
460 1.1 matt */
461 1.1 matt size_t kernel_size = bmi->bmi_kernelend;
462 1.1 matt kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
463 1.23 matt kernel_size += L1_TABLE_SIZE_REAL;
464 1.23 matt kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM;
465 1.23 matt if (map_vectors_p) {
466 1.23 matt kernel_size += PAGE_SIZE; /* L2PT for VECTORS */
467 1.23 matt }
468 1.23 matt if (iovbase) {
469 1.23 matt kernel_size += PAGE_SIZE; /* L2PT for IO */
470 1.23 matt }
471 1.1 matt kernel_size +=
472 1.1 matt cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
473 1.1 matt + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
474 1.11 skrll kernel_size += round_page(MSGBUFSIZE);
475 1.1 matt kernel_size += 0x10000; /* slop */
476 1.23 matt if (!mapallmem_p) {
477 1.23 matt kernel_size += PAGE_SIZE
478 1.23 matt * ((kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE);
479 1.23 matt }
480 1.1 matt kernel_size = round_page(kernel_size);
481 1.1 matt
482 1.1 matt /*
483 1.37 skrll * Now we know how many L2 pages it will take.
484 1.1 matt */
485 1.37 skrll const size_t KERNEL_L2PT_KERNEL_NUM =
486 1.37 skrll round_page(kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
487 1.1 matt
488 1.42 skrll VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
489 1.1 matt __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
490 1.1 matt
491 1.1 matt KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
492 1.1 matt pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
493 1.1 matt pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
494 1.1 matt pv_addr_t msgbuf;
495 1.1 matt pv_addr_t text;
496 1.1 matt pv_addr_t data;
497 1.1 matt pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11];
498 1.1 matt #if ARM_MMU_XSCALE == 1
499 1.1 matt pv_addr_t minidataclean;
500 1.1 matt #endif
501 1.1 matt
502 1.1 matt /*
503 1.1 matt * We need to allocate some fixed page tables to get the kernel going.
504 1.1 matt *
505 1.1 matt * We are going to allocate our bootstrap pages from the beginning of
506 1.1 matt * the free space that we just calculated. We allocate one page
507 1.1 matt * directory and a number of page tables and store the physical
508 1.10 skrll * addresses in the bmi_l2pts array in bootmem_info.
509 1.1 matt *
510 1.1 matt * The kernel page directory must be on a 16K boundary. The page
511 1.1 matt * tables must be on 4K boundaries. What we do is allocate the
512 1.1 matt * page directory on the first 16K boundary that we encounter, and
513 1.1 matt * the page tables on 4K boundaries otherwise. Since we allocate
514 1.1 matt * at least 3 L2 page tables, we are guaranteed to encounter at
515 1.1 matt * least one 16K aligned region.
516 1.1 matt */
517 1.1 matt
518 1.42 skrll VPRINTF("%s: allocating page tables for", __func__);
519 1.1 matt for (size_t i = 0; i < __arraycount(chunks); i++) {
520 1.1 matt SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
521 1.1 matt }
522 1.1 matt
523 1.1 matt kernel_l1pt.pv_pa = 0;
524 1.1 matt kernel_l1pt.pv_va = 0;
525 1.1 matt
526 1.1 matt /*
527 1.10 skrll * Allocate the L2 pages, but if we get to a page that is aligned for
528 1.10 skrll * an L1 page table, we will allocate the pages for it first and then
529 1.10 skrll * allocate the L2 page.
530 1.10 skrll */
531 1.10 skrll
532 1.19 matt if (map_vectors_p) {
533 1.19 matt /*
534 1.19 matt * First allocate L2 page for the vectors.
535 1.19 matt */
536 1.42 skrll VPRINTF(" vector");
537 1.23 matt valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1,
538 1.23 matt VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
539 1.19 matt add_pages(bmi, &bmi->bmi_vector_l2pt);
540 1.19 matt }
541 1.1 matt
542 1.1 matt /*
543 1.10 skrll * Now allocate L2 pages for the kernel
544 1.1 matt */
545 1.42 skrll VPRINTF(" kernel");
546 1.8 skrll for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
547 1.23 matt valloc_pages(bmi, &kernel_l2pt[idx], 1,
548 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
549 1.1 matt add_pages(bmi, &kernel_l2pt[idx]);
550 1.1 matt }
551 1.10 skrll
552 1.10 skrll /*
553 1.10 skrll * Now allocate L2 pages for the initial kernel VA space.
554 1.10 skrll */
555 1.42 skrll VPRINTF(" vm");
556 1.8 skrll for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
557 1.23 matt valloc_pages(bmi, &vmdata_l2pt[idx], 1,
558 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
559 1.1 matt add_pages(bmi, &vmdata_l2pt[idx]);
560 1.1 matt }
561 1.1 matt
562 1.1 matt /*
563 1.1 matt * If someone wanted a L2 page for I/O, allocate it now.
564 1.1 matt */
565 1.23 matt if (iovbase) {
566 1.42 skrll VPRINTF(" io");
567 1.23 matt valloc_pages(bmi, &bmi->bmi_io_l2pt, 1,
568 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
569 1.1 matt add_pages(bmi, &bmi->bmi_io_l2pt);
570 1.1 matt }
571 1.1 matt
572 1.42 skrll VPRINTF("%s: allocating stacks\n", __func__);
573 1.1 matt
574 1.10 skrll /* Allocate stacks for all modes and CPUs */
575 1.1 matt valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
576 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
577 1.1 matt add_pages(bmi, &abtstack);
578 1.1 matt valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
579 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
580 1.1 matt add_pages(bmi, &fiqstack);
581 1.1 matt valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
582 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
583 1.1 matt add_pages(bmi, &irqstack);
584 1.1 matt valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
585 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
586 1.1 matt add_pages(bmi, &undstack);
587 1.1 matt valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */
588 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
589 1.1 matt add_pages(bmi, &idlestack);
590 1.1 matt valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */
591 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
592 1.1 matt add_pages(bmi, &kernelstack);
593 1.1 matt
594 1.1 matt /* Allocate the message buffer from the end of memory. */
595 1.1 matt const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
596 1.1 matt valloc_pages(bmi, &msgbuf, msgbuf_pgs,
597 1.17 matt VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false);
598 1.1 matt add_pages(bmi, &msgbuf);
599 1.1 matt msgbufphys = msgbuf.pv_pa;
600 1.27 matt msgbufaddr = (void *)msgbuf.pv_va;
601 1.1 matt
602 1.19 matt if (map_vectors_p) {
603 1.19 matt /*
604 1.19 matt * Allocate a page for the system vector page.
605 1.19 matt * This page will just contain the system vectors and can be
606 1.19 matt * shared by all processes.
607 1.19 matt */
608 1.46 skrll VPRINTF(" vector");
609 1.46 skrll
610 1.37 skrll valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
611 1.19 matt PTE_CACHE, true);
612 1.19 matt }
613 1.1 matt systempage.pv_va = vectors;
614 1.1 matt
615 1.1 matt /*
616 1.1 matt * If the caller needed a few extra pages for some reason, allocate
617 1.1 matt * them now.
618 1.1 matt */
619 1.1 matt #if ARM_MMU_XSCALE == 1
620 1.1 matt #if (ARM_NMMUS > 1)
621 1.1 matt if (xscale_use_minidata)
622 1.36 skrll #endif
623 1.30 kiyohara valloc_pages(bmi, &minidataclean, 1,
624 1.18 matt VM_PROT_READ|VM_PROT_WRITE, 0, true);
625 1.1 matt #endif
626 1.1 matt
627 1.1 matt /*
628 1.1 matt * Ok we have allocated physical pages for the primary kernel
629 1.1 matt * page tables and stacks. Let's just confirm that.
630 1.1 matt */
631 1.1 matt if (kernel_l1pt.pv_va == 0
632 1.1 matt && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
633 1.1 matt panic("%s: Failed to allocate or align the kernel "
634 1.1 matt "page directory", __func__);
635 1.1 matt
636 1.46 skrll VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n",
637 1.46 skrll kernel_l1pt.pv_va, kernel_l1pt.pv_pa);
638 1.1 matt
639 1.1 matt /*
640 1.1 matt * Now we start construction of the L1 page table
641 1.1 matt * We start by mapping the L2 page tables into the L1.
642 1.1 matt * This means that we can replace L1 mappings later on if necessary
643 1.1 matt */
644 1.1 matt vaddr_t l1pt_va = kernel_l1pt.pv_va;
645 1.1 matt paddr_t l1pt_pa = kernel_l1pt.pv_pa;
646 1.1 matt
647 1.19 matt if (map_vectors_p) {
648 1.19 matt /* Map the L2 pages tables in the L1 page table */
649 1.19 matt pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
650 1.19 matt &bmi->bmi_vector_l2pt);
651 1.42 skrll VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) "
652 1.19 matt "for VA %#lx\n (vectors)",
653 1.19 matt __func__, bmi->bmi_vector_l2pt.pv_va,
654 1.19 matt bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va);
655 1.19 matt }
656 1.1 matt
657 1.46 skrll /*
658 1.46 skrll * This enforces a alignment requirement of L2_S_SEGSIZE for kernel
659 1.46 skrll * start PA
660 1.46 skrll */
661 1.1 matt const vaddr_t kernel_base =
662 1.44 skrll KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE);
663 1.46 skrll
664 1.46 skrll VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__,
665 1.46 skrll kernel_base, KERNEL_L2PT_KERNEL_NUM);
666 1.46 skrll
667 1.1 matt for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
668 1.1 matt pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE,
669 1.1 matt &kernel_l2pt[idx]);
670 1.42 skrll VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n",
671 1.23 matt __func__, kernel_l2pt[idx].pv_va,
672 1.23 matt kernel_l2pt[idx].pv_pa, kernel_base + idx * L2_S_SEGSIZE);
673 1.1 matt }
674 1.1 matt
675 1.49 skrll VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__,
676 1.49 skrll kernel_vm_base, KERNEL_L2PT_VMDATA_NUM);
677 1.46 skrll
678 1.1 matt for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
679 1.1 matt pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE,
680 1.1 matt &vmdata_l2pt[idx]);
681 1.42 skrll VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n",
682 1.1 matt __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
683 1.1 matt kernel_vm_base + idx * L2_S_SEGSIZE);
684 1.1 matt }
685 1.1 matt if (iovbase) {
686 1.1 matt pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt);
687 1.42 skrll VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n",
688 1.1 matt __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
689 1.1 matt iovbase & -L2_S_SEGSIZE);
690 1.1 matt }
691 1.1 matt
692 1.1 matt /* update the top of the kernel VM */
693 1.1 matt pmap_curmaxkvaddr =
694 1.1 matt kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
695 1.1 matt
696 1.46 skrll // This could be done earlier and then the kernel data and pages
697 1.46 skrll // allocated above would get merged (concatentated)
698 1.46 skrll
699 1.42 skrll VPRINTF("Mapping kernel\n");
700 1.1 matt
701 1.44 skrll extern char etext[];
702 1.1 matt size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
703 1.44 skrll size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart;
704 1.1 matt
705 1.1 matt textsize = (textsize + PGOFSET) & ~PGOFSET;
706 1.1 matt
707 1.1 matt /* start at offset of kernel in RAM */
708 1.1 matt
709 1.1 matt text.pv_pa = bmi->bmi_kernelstart;
710 1.44 skrll text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart);
711 1.1 matt text.pv_size = textsize;
712 1.40 skrll text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE;
713 1.1 matt text.pv_cache = PTE_CACHE;
714 1.1 matt
715 1.42 skrll VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
716 1.1 matt __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
717 1.1 matt
718 1.1 matt add_pages(bmi, &text);
719 1.1 matt
720 1.1 matt data.pv_pa = text.pv_pa + textsize;
721 1.1 matt data.pv_va = text.pv_va + textsize;
722 1.1 matt data.pv_size = totalsize - textsize;
723 1.51 skrll data.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
724 1.1 matt data.pv_cache = PTE_CACHE;
725 1.1 matt
726 1.42 skrll VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
727 1.1 matt __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
728 1.1 matt
729 1.1 matt add_pages(bmi, &data);
730 1.1 matt
731 1.42 skrll VPRINTF("Listing Chunks\n");
732 1.26 skrll
733 1.26 skrll pv_addr_t *lpv;
734 1.26 skrll SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) {
735 1.42 skrll VPRINTF("%s: pv %p: chunk VA %#lx..%#lx "
736 1.26 skrll "(PA %#lx, prot %d, cache %d)\n",
737 1.26 skrll __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1,
738 1.26 skrll lpv->pv_pa, lpv->pv_prot, lpv->pv_cache);
739 1.1 matt }
740 1.42 skrll VPRINTF("\nMapping Chunks\n");
741 1.1 matt
742 1.1 matt pv_addr_t cur_pv;
743 1.1 matt pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
744 1.1 matt if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
745 1.1 matt cur_pv = *pv;
746 1.35 matt KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va);
747 1.1 matt pv = SLIST_NEXT(pv, pv_list);
748 1.1 matt } else {
749 1.13 matt cur_pv.pv_va = KERNEL_BASE;
750 1.44 skrll cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va);
751 1.35 matt cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa;
752 1.1 matt cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
753 1.1 matt cur_pv.pv_cache = PTE_CACHE;
754 1.1 matt }
755 1.1 matt while (pv != NULL) {
756 1.1 matt if (mapallmem_p) {
757 1.1 matt if (concat_pvaddr(&cur_pv, pv)) {
758 1.1 matt pv = SLIST_NEXT(pv, pv_list);
759 1.1 matt continue;
760 1.1 matt }
761 1.1 matt if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
762 1.1 matt /*
763 1.1 matt * See if we can extend the current pv to emcompass the
764 1.1 matt * hole, and if so do it and retry the concatenation.
765 1.1 matt */
766 1.1 matt if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE)
767 1.1 matt && cur_pv.pv_cache == PTE_CACHE) {
768 1.1 matt cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
769 1.1 matt continue;
770 1.1 matt }
771 1.1 matt
772 1.1 matt /*
773 1.1 matt * We couldn't so emit the current chunk and then
774 1.1 matt */
775 1.42 skrll VPRINTF("%s: mapping chunk VA %#lx..%#lx "
776 1.1 matt "(PA %#lx, prot %d, cache %d)\n",
777 1.1 matt __func__,
778 1.1 matt cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
779 1.1 matt cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
780 1.1 matt pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
781 1.1 matt cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
782 1.1 matt
783 1.1 matt /*
784 1.1 matt * set the current chunk to the hole and try again.
785 1.1 matt */
786 1.1 matt cur_pv.pv_pa += cur_pv.pv_size;
787 1.1 matt cur_pv.pv_va += cur_pv.pv_size;
788 1.1 matt cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
789 1.1 matt cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
790 1.1 matt cur_pv.pv_cache = PTE_CACHE;
791 1.1 matt continue;
792 1.1 matt }
793 1.1 matt }
794 1.1 matt
795 1.1 matt /*
796 1.1 matt * The new pv didn't concatenate so emit the current one
797 1.1 matt * and use the new pv as the current pv.
798 1.1 matt */
799 1.42 skrll VPRINTF("%s: mapping chunk VA %#lx..%#lx "
800 1.1 matt "(PA %#lx, prot %d, cache %d)\n",
801 1.1 matt __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
802 1.1 matt cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
803 1.1 matt pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
804 1.1 matt cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
805 1.1 matt cur_pv = *pv;
806 1.1 matt pv = SLIST_NEXT(pv, pv_list);
807 1.1 matt }
808 1.1 matt
809 1.1 matt /*
810 1.1 matt * If we are mapping all of memory, let's map the rest of memory.
811 1.1 matt */
812 1.1 matt if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
813 1.1 matt if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
814 1.1 matt && cur_pv.pv_cache == PTE_CACHE) {
815 1.1 matt cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
816 1.1 matt } else {
817 1.34 matt KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base,
818 1.34 matt "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size,
819 1.34 matt kernel_vm_base);
820 1.42 skrll VPRINTF("%s: mapping chunk VA %#lx..%#lx "
821 1.1 matt "(PA %#lx, prot %d, cache %d)\n",
822 1.1 matt __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
823 1.1 matt cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
824 1.1 matt pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
825 1.1 matt cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
826 1.1 matt cur_pv.pv_pa += cur_pv.pv_size;
827 1.1 matt cur_pv.pv_va += cur_pv.pv_size;
828 1.1 matt cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
829 1.1 matt cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
830 1.1 matt cur_pv.pv_cache = PTE_CACHE;
831 1.1 matt }
832 1.1 matt }
833 1.1 matt
834 1.50 skrll /*
835 1.50 skrll * The amount we can direct map is limited by the start of the
836 1.50 skrll * virtual part of the kernel address space. Don't overrun
837 1.50 skrll * into it.
838 1.50 skrll */
839 1.34 matt if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) {
840 1.34 matt cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va;
841 1.34 matt }
842 1.34 matt
843 1.1 matt /*
844 1.1 matt * Now we map the final chunk.
845 1.1 matt */
846 1.42 skrll VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
847 1.1 matt __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
848 1.1 matt cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
849 1.1 matt pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
850 1.1 matt cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
851 1.1 matt
852 1.1 matt /*
853 1.1 matt * Now we map the stuff that isn't directly after the kernel
854 1.1 matt */
855 1.19 matt if (map_vectors_p) {
856 1.19 matt /* Map the vector page. */
857 1.19 matt pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
858 1.37 skrll VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
859 1.19 matt }
860 1.1 matt
861 1.36 skrll /* Map the Mini-Data cache clean area. */
862 1.1 matt #if ARM_MMU_XSCALE == 1
863 1.1 matt #if (ARM_NMMUS > 1)
864 1.1 matt if (xscale_use_minidata)
865 1.36 skrll #endif
866 1.30 kiyohara xscale_setup_minidata(l1pt_va, minidataclean.pv_va,
867 1.36 skrll minidataclean.pv_pa);
868 1.1 matt #endif
869 1.1 matt
870 1.1 matt /*
871 1.1 matt * Map integrated peripherals at same address in first level page
872 1.1 matt * table so that we can continue to use console.
873 1.1 matt */
874 1.1 matt if (devmap)
875 1.1 matt pmap_devmap_bootstrap(l1pt_va, devmap);
876 1.1 matt
877 1.1 matt /* Tell the user about where all the bits and pieces live. */
878 1.42 skrll VPRINTF("%22s Physical Virtual Num\n", " ");
879 1.42 skrll VPRINTF("%22s Starting Ending Starting Ending Pages\n", " ");
880 1.1 matt
881 1.43 martin #ifdef VERBOSE_INIT_ARM
882 1.1 matt static const char mem_fmt[] =
883 1.1 matt "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
884 1.1 matt static const char mem_fmt_nov[] =
885 1.1 matt "%20s: 0x%08lx 0x%08lx %zu\n";
886 1.43 martin #endif
887 1.1 matt
888 1.46 skrll #if 0
889 1.46 skrll // XXX Doesn't make sense if kernel not at bottom of RAM
890 1.42 skrll VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
891 1.44 skrll KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1),
892 1.38 skrll (int)physmem);
893 1.46 skrll #endif
894 1.42 skrll VPRINTF(mem_fmt, "text section",
895 1.1 matt text.pv_pa, text.pv_pa + text.pv_size - 1,
896 1.1 matt text.pv_va, text.pv_va + text.pv_size - 1,
897 1.1 matt (int)(text.pv_size / PAGE_SIZE));
898 1.42 skrll VPRINTF(mem_fmt, "data section",
899 1.44 skrll KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata),
900 1.1 matt (vaddr_t)__data_start, (vaddr_t)_edata,
901 1.1 matt (int)((round_page((vaddr_t)_edata)
902 1.1 matt - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
903 1.42 skrll VPRINTF(mem_fmt, "bss section",
904 1.44 skrll KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__),
905 1.1 matt (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
906 1.1 matt (int)((round_page((vaddr_t)__bss_end__)
907 1.1 matt - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
908 1.42 skrll VPRINTF(mem_fmt, "L1 page directory",
909 1.1 matt kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
910 1.1 matt kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
911 1.1 matt L1_TABLE_SIZE / PAGE_SIZE);
912 1.42 skrll VPRINTF(mem_fmt, "ABT stack (CPU 0)",
913 1.7 skrll abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
914 1.7 skrll abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
915 1.7 skrll ABT_STACK_SIZE);
916 1.42 skrll VPRINTF(mem_fmt, "FIQ stack (CPU 0)",
917 1.1 matt fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
918 1.1 matt fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
919 1.1 matt FIQ_STACK_SIZE);
920 1.42 skrll VPRINTF(mem_fmt, "IRQ stack (CPU 0)",
921 1.1 matt irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
922 1.1 matt irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
923 1.1 matt IRQ_STACK_SIZE);
924 1.42 skrll VPRINTF(mem_fmt, "UND stack (CPU 0)",
925 1.1 matt undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
926 1.1 matt undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
927 1.1 matt UND_STACK_SIZE);
928 1.42 skrll VPRINTF(mem_fmt, "IDLE stack (CPU 0)",
929 1.1 matt idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
930 1.1 matt idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
931 1.1 matt UPAGES);
932 1.42 skrll VPRINTF(mem_fmt, "SVC stack",
933 1.1 matt kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
934 1.1 matt kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
935 1.1 matt UPAGES);
936 1.42 skrll VPRINTF(mem_fmt, "Message Buffer",
937 1.9 skrll msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
938 1.9 skrll msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
939 1.9 skrll (int)msgbuf_pgs);
940 1.19 matt if (map_vectors_p) {
941 1.42 skrll VPRINTF(mem_fmt, "Exception Vectors",
942 1.19 matt systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
943 1.19 matt systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
944 1.19 matt 1);
945 1.19 matt }
946 1.1 matt for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
947 1.1 matt pv = &bmi->bmi_freeblocks[i];
948 1.1 matt
949 1.42 skrll VPRINTF(mem_fmt_nov, "Free Memory",
950 1.1 matt pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
951 1.1 matt pv->pv_size / PAGE_SIZE);
952 1.1 matt }
953 1.1 matt /*
954 1.1 matt * Now we have the real page tables in place so we can switch to them.
955 1.1 matt * Once this is done we will be running with the REAL kernel page
956 1.1 matt * tables.
957 1.1 matt */
958 1.1 matt
959 1.42 skrll VPRINTF("TTBR0=%#x", armreg_ttbr_read());
960 1.2 matt #ifdef _ARM_ARCH_6
961 1.42 skrll VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x",
962 1.25 matt armreg_ttbr1_read(), armreg_ttbcr_read(),
963 1.25 matt armreg_contextidr_read());
964 1.2 matt #endif
965 1.42 skrll VPRINTF("\n");
966 1.2 matt
967 1.1 matt /* Switch tables */
968 1.46 skrll VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa);
969 1.46 skrll
970 1.46 skrll cpu_ttb = l1pt_pa;
971 1.46 skrll
972 1.46 skrll cpu_domains(DOMAIN_DEFAULT);
973 1.46 skrll
974 1.46 skrll cpu_idcache_wbinv_all();
975 1.46 skrll
976 1.46 skrll #ifdef __HAVE_GENERIC_START
977 1.1 matt
978 1.46 skrll /*
979 1.46 skrll * Turn on caches and set SCTLR/ACTLR
980 1.46 skrll */
981 1.46 skrll cpu_setup(boot_args);
982 1.23 matt #endif
983 1.46 skrll
984 1.42 skrll VPRINTF(" ttb");
985 1.46 skrll
986 1.17 matt #ifdef ARM_MMU_EXTENDED
987 1.23 matt /*
988 1.23 matt * TTBCR should have been initialized by the MD start code.
989 1.23 matt */
990 1.25 matt KASSERT((armreg_contextidr_read() & 0xff) == 0);
991 1.23 matt KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N));
992 1.24 matt /*
993 1.24 matt * Disable lookups via TTBR0 until there is an activated pmap.
994 1.24 matt */
995 1.24 matt armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0);
996 1.17 matt cpu_setttb(l1pt_pa, KERNEL_PID);
997 1.24 matt arm_isb();
998 1.17 matt #else
999 1.4 matt cpu_setttb(l1pt_pa, true);
1000 1.17 matt #endif
1001 1.46 skrll
1002 1.1 matt cpu_tlb_flushID();
1003 1.1 matt
1004 1.23 matt #ifdef ARM_MMU_EXTENDED
1005 1.46 skrll VPRINTF("\nsctlr=%#x actlr=%#x\n",
1006 1.46 skrll armreg_sctlr_read(), armreg_auxctl_read());
1007 1.23 matt #else
1008 1.42 skrll VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read());
1009 1.25 matt #endif
1010 1.25 matt
1011 1.25 matt #ifdef MULTIPROCESSOR
1012 1.46 skrll #ifndef __HAVE_GENERIC_START
1013 1.25 matt /*
1014 1.25 matt * Kick the secondaries to load the TTB. After which they'll go
1015 1.25 matt * back to sleep to wait for the final kick so they will hatch.
1016 1.25 matt */
1017 1.42 skrll VPRINTF(" hatchlings");
1018 1.25 matt cpu_boot_secondary_processors();
1019 1.25 matt #endif
1020 1.46 skrll #endif
1021 1.25 matt
1022 1.42 skrll VPRINTF(" OK\n");
1023 1.1 matt }
1024