mm.c revision 1.7 1 1.7 maxv /* $NetBSD: mm.c,v 1.7 2017/10/29 11:38:43 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.1 maxv * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
5 1.1 maxv *
6 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
7 1.1 maxv * by Maxime Villard.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include "prekern.h"
32 1.1 maxv
33 1.1 maxv static const pt_entry_t protection_codes[3] = {
34 1.1 maxv [MM_PROT_READ] = PG_RO | PG_NX,
35 1.1 maxv [MM_PROT_WRITE] = PG_RW | PG_NX,
36 1.1 maxv [MM_PROT_EXECUTE] = PG_RO,
37 1.1 maxv /* RWX does not exist */
38 1.1 maxv };
39 1.1 maxv
40 1.6 maxv struct bootspace bootspace;
41 1.6 maxv
42 1.1 maxv extern paddr_t kernpa_start, kernpa_end;
43 1.1 maxv vaddr_t iom_base;
44 1.1 maxv
45 1.1 maxv paddr_t pa_avail = 0;
46 1.2 maxv static const vaddr_t tmpva = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
47 1.1 maxv
48 1.1 maxv void
49 1.1 maxv mm_init(paddr_t first_pa)
50 1.1 maxv {
51 1.1 maxv pa_avail = first_pa;
52 1.1 maxv }
53 1.1 maxv
54 1.1 maxv static void
55 1.1 maxv mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
56 1.1 maxv {
57 1.1 maxv PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
58 1.1 maxv }
59 1.1 maxv
60 1.1 maxv static void
61 1.1 maxv mm_flush_va(vaddr_t va)
62 1.1 maxv {
63 1.1 maxv asm volatile("invlpg (%0)" ::"r" (va) : "memory");
64 1.1 maxv }
65 1.1 maxv
66 1.2 maxv static paddr_t
67 1.2 maxv mm_palloc(size_t npages)
68 1.2 maxv {
69 1.2 maxv paddr_t pa;
70 1.2 maxv size_t i;
71 1.2 maxv
72 1.2 maxv /* Allocate the physical pages */
73 1.2 maxv pa = pa_avail;
74 1.2 maxv pa_avail += npages * PAGE_SIZE;
75 1.2 maxv
76 1.2 maxv /* Zero them out */
77 1.2 maxv for (i = 0; i < npages; i++) {
78 1.2 maxv mm_enter_pa(pa + i * PAGE_SIZE, tmpva,
79 1.2 maxv MM_PROT_READ|MM_PROT_WRITE);
80 1.2 maxv mm_flush_va(tmpva);
81 1.2 maxv memset((void *)tmpva, 0, PAGE_SIZE);
82 1.2 maxv }
83 1.2 maxv
84 1.2 maxv return pa;
85 1.2 maxv }
86 1.2 maxv
87 1.3 maxv static bool
88 1.3 maxv mm_pte_is_valid(pt_entry_t pte)
89 1.3 maxv {
90 1.3 maxv return ((pte & PG_V) != 0);
91 1.3 maxv }
92 1.3 maxv
93 1.1 maxv paddr_t
94 1.1 maxv mm_vatopa(vaddr_t va)
95 1.1 maxv {
96 1.1 maxv return (PTE_BASE[pl1_i(va)] & PG_FRAME);
97 1.1 maxv }
98 1.1 maxv
99 1.1 maxv void
100 1.1 maxv mm_mprotect(vaddr_t startva, size_t size, int prot)
101 1.1 maxv {
102 1.1 maxv size_t i, npages;
103 1.1 maxv vaddr_t va;
104 1.1 maxv paddr_t pa;
105 1.1 maxv
106 1.1 maxv ASSERT(size % PAGE_SIZE == 0);
107 1.1 maxv npages = size / PAGE_SIZE;
108 1.1 maxv
109 1.1 maxv for (i = 0; i < npages; i++) {
110 1.1 maxv va = startva + i * PAGE_SIZE;
111 1.1 maxv pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
112 1.1 maxv mm_enter_pa(pa, va, prot);
113 1.1 maxv mm_flush_va(va);
114 1.1 maxv }
115 1.1 maxv }
116 1.1 maxv
117 1.5 maxv static size_t
118 1.5 maxv mm_nentries_range(vaddr_t startva, vaddr_t endva, size_t pgsz)
119 1.5 maxv {
120 1.5 maxv size_t npages;
121 1.5 maxv
122 1.5 maxv npages = roundup((endva / PAGE_SIZE), (pgsz / PAGE_SIZE)) -
123 1.5 maxv rounddown((startva / PAGE_SIZE), (pgsz / PAGE_SIZE));
124 1.5 maxv return (npages / (pgsz / PAGE_SIZE));
125 1.5 maxv }
126 1.5 maxv
127 1.1 maxv static void
128 1.2 maxv mm_map_tree(vaddr_t startva, vaddr_t endva)
129 1.1 maxv {
130 1.5 maxv size_t i, nL4e, nL3e, nL2e;
131 1.1 maxv size_t L4e_idx, L3e_idx, L2e_idx;
132 1.3 maxv paddr_t pa;
133 1.3 maxv
134 1.1 maxv /*
135 1.3 maxv * Build L4.
136 1.1 maxv */
137 1.3 maxv L4e_idx = pl4_i(startva);
138 1.5 maxv nL4e = mm_nentries_range(startva, endva, NBPD_L4);
139 1.3 maxv ASSERT(L4e_idx == 511);
140 1.2 maxv ASSERT(nL4e == 1);
141 1.3 maxv if (!mm_pte_is_valid(L4_BASE[L4e_idx])) {
142 1.3 maxv pa = mm_palloc(1);
143 1.3 maxv L4_BASE[L4e_idx] = pa | PG_V | PG_RW;
144 1.3 maxv }
145 1.1 maxv
146 1.1 maxv /*
147 1.3 maxv * Build L3.
148 1.1 maxv */
149 1.3 maxv L3e_idx = pl3_i(startva);
150 1.5 maxv nL3e = mm_nentries_range(startva, endva, NBPD_L3);
151 1.3 maxv for (i = 0; i < nL3e; i++) {
152 1.3 maxv if (mm_pte_is_valid(L3_BASE[L3e_idx+i])) {
153 1.3 maxv continue;
154 1.3 maxv }
155 1.3 maxv pa = mm_palloc(1);
156 1.3 maxv L3_BASE[L3e_idx+i] = pa | PG_V | PG_RW;
157 1.3 maxv }
158 1.1 maxv
159 1.1 maxv /*
160 1.3 maxv * Build L2.
161 1.1 maxv */
162 1.3 maxv L2e_idx = pl2_i(startva);
163 1.5 maxv nL2e = mm_nentries_range(startva, endva, NBPD_L2);
164 1.2 maxv for (i = 0; i < nL2e; i++) {
165 1.3 maxv if (mm_pte_is_valid(L2_BASE[L2e_idx+i])) {
166 1.3 maxv continue;
167 1.3 maxv }
168 1.3 maxv pa = mm_palloc(1);
169 1.3 maxv L2_BASE[L2e_idx+i] = pa | PG_V | PG_RW;
170 1.1 maxv }
171 1.1 maxv }
172 1.1 maxv
173 1.6 maxv static uint64_t
174 1.6 maxv mm_rand_num64()
175 1.6 maxv {
176 1.6 maxv /* XXX: yes, this is ridiculous, will be fixed soon */
177 1.6 maxv return rdtsc();
178 1.6 maxv }
179 1.6 maxv
180 1.6 maxv static void
181 1.6 maxv mm_map_head()
182 1.6 maxv {
183 1.6 maxv size_t i, npages, size;
184 1.6 maxv uint64_t rnd;
185 1.6 maxv vaddr_t randva;
186 1.6 maxv
187 1.6 maxv /*
188 1.6 maxv * To get the size of the head, we give a look at the read-only
189 1.6 maxv * mapping of the kernel we created in locore. We're identity mapped,
190 1.6 maxv * so kernpa = kernva.
191 1.6 maxv */
192 1.6 maxv size = elf_get_head_size((vaddr_t)kernpa_start);
193 1.6 maxv npages = size / PAGE_SIZE;
194 1.6 maxv
195 1.6 maxv rnd = mm_rand_num64();
196 1.6 maxv randva = rounddown(HEAD_WINDOW_BASE + rnd % (HEAD_WINDOW_SIZE - size),
197 1.6 maxv PAGE_SIZE);
198 1.6 maxv mm_map_tree(randva, randva + size);
199 1.6 maxv
200 1.6 maxv /* Enter the area and build the ELF info */
201 1.6 maxv for (i = 0; i < npages; i++) {
202 1.6 maxv mm_enter_pa(kernpa_start + i * PAGE_SIZE,
203 1.6 maxv randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
204 1.6 maxv }
205 1.6 maxv elf_build_head(randva);
206 1.6 maxv
207 1.6 maxv /* Register the values in bootspace */
208 1.6 maxv bootspace.head.va = randva;
209 1.6 maxv bootspace.head.pa = kernpa_start;
210 1.6 maxv bootspace.head.sz = size;
211 1.6 maxv }
212 1.6 maxv
213 1.1 maxv static vaddr_t
214 1.6 maxv mm_randva_kregion(size_t size)
215 1.1 maxv {
216 1.6 maxv static struct {
217 1.6 maxv vaddr_t sva;
218 1.6 maxv vaddr_t eva;
219 1.6 maxv } regions[4];
220 1.6 maxv static size_t idx = 0;
221 1.1 maxv vaddr_t randva;
222 1.1 maxv uint64_t rnd;
223 1.6 maxv size_t i;
224 1.6 maxv bool ok;
225 1.6 maxv
226 1.6 maxv ASSERT(idx < 4);
227 1.1 maxv
228 1.6 maxv while (1) {
229 1.6 maxv rnd = mm_rand_num64();
230 1.6 maxv randva = rounddown(KASLR_WINDOW_BASE +
231 1.6 maxv rnd % (KASLR_WINDOW_SIZE - size), PAGE_SIZE);
232 1.6 maxv
233 1.6 maxv /* Detect collisions */
234 1.6 maxv ok = true;
235 1.6 maxv for (i = 0; i < idx; i++) {
236 1.6 maxv if ((regions[i].sva <= randva) &&
237 1.6 maxv (randva < regions[i].eva)) {
238 1.6 maxv ok = false;
239 1.6 maxv break;
240 1.6 maxv }
241 1.6 maxv if ((regions[i].sva < randva + size) &&
242 1.6 maxv (randva + size <= regions[i].eva)) {
243 1.6 maxv ok = false;
244 1.6 maxv break;
245 1.6 maxv }
246 1.6 maxv }
247 1.6 maxv if (ok) {
248 1.6 maxv break;
249 1.6 maxv }
250 1.6 maxv }
251 1.1 maxv
252 1.6 maxv regions[idx].eva = randva;
253 1.6 maxv regions[idx].sva = randva + size;
254 1.6 maxv idx++;
255 1.1 maxv
256 1.2 maxv mm_map_tree(randva, randva + size);
257 1.1 maxv
258 1.1 maxv return randva;
259 1.1 maxv }
260 1.1 maxv
261 1.6 maxv static void
262 1.6 maxv mm_map_segments()
263 1.1 maxv {
264 1.1 maxv size_t i, npages, size;
265 1.6 maxv vaddr_t randva;
266 1.6 maxv paddr_t pa;
267 1.1 maxv
268 1.6 maxv /*
269 1.6 maxv * Kernel text segment.
270 1.6 maxv */
271 1.6 maxv elf_get_text(&pa, &size);
272 1.6 maxv randva = mm_randva_kregion(size);
273 1.1 maxv npages = size / PAGE_SIZE;
274 1.1 maxv
275 1.6 maxv /* Enter the area and build the ELF info */
276 1.1 maxv for (i = 0; i < npages; i++) {
277 1.6 maxv mm_enter_pa(pa + i * PAGE_SIZE,
278 1.6 maxv randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
279 1.6 maxv }
280 1.6 maxv elf_build_text(randva, pa, size);
281 1.6 maxv
282 1.6 maxv /* Register the values in bootspace */
283 1.6 maxv bootspace.text.va = randva;
284 1.6 maxv bootspace.text.pa = pa;
285 1.6 maxv bootspace.text.sz = size;
286 1.6 maxv
287 1.6 maxv /*
288 1.6 maxv * Kernel rodata segment.
289 1.6 maxv */
290 1.6 maxv elf_get_rodata(&pa, &size);
291 1.6 maxv randva = mm_randva_kregion(size);
292 1.6 maxv npages = size / PAGE_SIZE;
293 1.6 maxv
294 1.6 maxv /* Enter the area and build the ELF info */
295 1.6 maxv for (i = 0; i < npages; i++) {
296 1.6 maxv mm_enter_pa(pa + i * PAGE_SIZE,
297 1.6 maxv randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
298 1.6 maxv }
299 1.6 maxv elf_build_rodata(randva, pa, size);
300 1.6 maxv
301 1.6 maxv /* Register the values in bootspace */
302 1.6 maxv bootspace.rodata.va = randva;
303 1.6 maxv bootspace.rodata.pa = pa;
304 1.6 maxv bootspace.rodata.sz = size;
305 1.6 maxv
306 1.6 maxv /*
307 1.6 maxv * Kernel data segment.
308 1.6 maxv */
309 1.6 maxv elf_get_data(&pa, &size);
310 1.6 maxv randva = mm_randva_kregion(size);
311 1.6 maxv npages = size / PAGE_SIZE;
312 1.6 maxv
313 1.6 maxv /* Enter the area and build the ELF info */
314 1.6 maxv for (i = 0; i < npages; i++) {
315 1.6 maxv mm_enter_pa(pa + i * PAGE_SIZE,
316 1.6 maxv randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
317 1.6 maxv }
318 1.6 maxv elf_build_data(randva, pa, size);
319 1.6 maxv
320 1.6 maxv /* Register the values in bootspace */
321 1.6 maxv bootspace.data.va = randva;
322 1.6 maxv bootspace.data.pa = pa;
323 1.6 maxv bootspace.data.sz = size;
324 1.6 maxv }
325 1.6 maxv
326 1.6 maxv static void
327 1.6 maxv mm_map_boot()
328 1.6 maxv {
329 1.6 maxv size_t i, npages, size;
330 1.6 maxv vaddr_t randva;
331 1.6 maxv paddr_t bootpa;
332 1.6 maxv
333 1.6 maxv /*
334 1.6 maxv * The "boot" region is special: its page tree has a fixed size, but
335 1.6 maxv * the number of pages entered is lower.
336 1.6 maxv */
337 1.6 maxv
338 1.6 maxv /* Create the page tree */
339 1.6 maxv size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
340 1.6 maxv randva = mm_randva_kregion(size);
341 1.6 maxv
342 1.6 maxv /* Enter the area and build the ELF info */
343 1.6 maxv bootpa = bootspace.data.pa + bootspace.data.sz;
344 1.6 maxv size = (pa_avail - bootpa);
345 1.6 maxv npages = size / PAGE_SIZE;
346 1.6 maxv for (i = 0; i < npages; i++) {
347 1.6 maxv mm_enter_pa(bootpa + i * PAGE_SIZE,
348 1.6 maxv randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
349 1.1 maxv }
350 1.6 maxv elf_build_boot(randva, bootpa);
351 1.1 maxv
352 1.1 maxv /* Enter the ISA I/O MEM */
353 1.6 maxv iom_base = randva + npages * PAGE_SIZE;
354 1.1 maxv npages = IOM_SIZE / PAGE_SIZE;
355 1.1 maxv for (i = 0; i < npages; i++) {
356 1.1 maxv mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
357 1.1 maxv iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
358 1.1 maxv }
359 1.1 maxv
360 1.6 maxv /* Register the values in bootspace */
361 1.6 maxv bootspace.boot.va = randva;
362 1.6 maxv bootspace.boot.pa = bootpa;
363 1.6 maxv bootspace.boot.sz = (size_t)(iom_base + IOM_SIZE) -
364 1.6 maxv (size_t)bootspace.boot.va;
365 1.6 maxv
366 1.6 maxv /* Initialize the values that are located in the "boot" region */
367 1.6 maxv extern uint64_t PDPpaddr;
368 1.6 maxv bootspace.spareva = bootspace.boot.va + NKL2_KIMG_ENTRIES * NBPD_L2;
369 1.6 maxv bootspace.pdir = bootspace.boot.va + (PDPpaddr - bootspace.boot.pa);
370 1.6 maxv bootspace.emodule = bootspace.boot.va + NKL2_KIMG_ENTRIES * NBPD_L2;
371 1.1 maxv }
372 1.6 maxv
373 1.6 maxv /*
374 1.6 maxv * There are five independent regions: head, text, rodata, data, boot. They are
375 1.6 maxv * all mapped at random VAs.
376 1.6 maxv *
377 1.6 maxv * Head contains the ELF Header and ELF Section Headers, and we use them to
378 1.6 maxv * map the rest of the regions. Head must be placed in memory *before* the
379 1.6 maxv * other regions.
380 1.6 maxv *
381 1.6 maxv * At the end of this function, the bootspace structure is fully constructed.
382 1.6 maxv */
383 1.6 maxv void
384 1.6 maxv mm_map_kernel()
385 1.6 maxv {
386 1.6 maxv memset(&bootspace, 0, sizeof(bootspace));
387 1.6 maxv mm_map_head();
388 1.7 maxv print_state(true, "Head region mapped");
389 1.6 maxv mm_map_segments();
390 1.7 maxv print_state(true, "Segments mapped");
391 1.6 maxv mm_map_boot();
392 1.7 maxv print_state(true, "Boot region mapped");
393 1.6 maxv }
394 1.6 maxv
395