mm.c revision 1.1 1 /* $NetBSD: mm.c,v 1.1 2017/10/10 09:29:14 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Maxime Villard.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "prekern.h"
32
33 static const pt_entry_t protection_codes[3] = {
34 [MM_PROT_READ] = PG_RO | PG_NX,
35 [MM_PROT_WRITE] = PG_RW | PG_NX,
36 [MM_PROT_EXECUTE] = PG_RO,
37 /* RWX does not exist */
38 };
39
40 extern paddr_t kernpa_start, kernpa_end;
41 vaddr_t iom_base;
42
43 paddr_t pa_avail = 0;
44 static vaddr_t va_avail = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
45 static vaddr_t va_end = (PREKERNBASE + (NKL2_KIMG_ENTRIES + 1) * NBPD_L2);
46
47 void
48 mm_init(paddr_t first_pa)
49 {
50 pa_avail = first_pa;
51 }
52
53 static paddr_t
54 mm_palloc(size_t npages)
55 {
56 paddr_t pa = pa_avail;
57 pa_avail += npages * PAGE_SIZE;
58 return pa;
59 }
60
61 static vaddr_t
62 mm_valloc(size_t npages)
63 {
64 vaddr_t va = va_avail;
65 va_avail += npages * PAGE_SIZE;
66 if (va_avail > va_end) {
67 fatal("mm_valloc: no VA left");
68 }
69 return va;
70 }
71
72 static void
73 mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
74 {
75 PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
76 }
77
78 static void
79 mm_flush_va(vaddr_t va)
80 {
81 asm volatile("invlpg (%0)" ::"r" (va) : "memory");
82 }
83
84 paddr_t
85 mm_vatopa(vaddr_t va)
86 {
87 return (PTE_BASE[pl1_i(va)] & PG_FRAME);
88 }
89
90 void
91 mm_mprotect(vaddr_t startva, size_t size, int prot)
92 {
93 size_t i, npages;
94 vaddr_t va;
95 paddr_t pa;
96
97 ASSERT(size % PAGE_SIZE == 0);
98 npages = size / PAGE_SIZE;
99
100 for (i = 0; i < npages; i++) {
101 va = startva + i * PAGE_SIZE;
102 pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
103 mm_enter_pa(pa, va, prot);
104 mm_flush_va(va);
105 }
106 }
107
108 static void
109 mm_map_va(vaddr_t startva, vaddr_t endva)
110 {
111 size_t i, idx, size, nL4e, nL3e, nL2e;
112 size_t L4e_idx, L3e_idx, L2e_idx;
113 vaddr_t L3page_va, L2page_va;
114 paddr_t L3page_pa, L2page_pa, L1page_pa;
115 pd_entry_t *pdir;
116
117 /*
118 * Initialize constants.
119 */
120 size = endva - startva;
121 nL4e = roundup(size, NBPD_L4) / NBPD_L4;
122 nL3e = roundup(size, NBPD_L3) / NBPD_L3;
123 nL2e = roundup(size, NBPD_L2) / NBPD_L2;
124 L4e_idx = pl4_i(startva);
125 L3e_idx = pl3_i(startva % NBPD_L4);
126 L2e_idx = pl2_i(startva % NBPD_L3);
127
128 /*
129 * Map the sub-tree itself.
130 */
131 L3page_va = mm_valloc(nL4e);
132 L3page_pa = mm_palloc(nL4e);
133 L2page_va = mm_valloc(nL3e);
134 L2page_pa = mm_palloc(nL3e);
135
136 L1page_pa = mm_palloc(nL2e);
137
138 for (i = 0; i < nL4e; i++) {
139 mm_enter_pa(L3page_pa + i * PAGE_SIZE,
140 L3page_va + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
141 memset((void *)(L3page_va + i * PAGE_SIZE), 0, PAGE_SIZE);
142 }
143
144 for (i = 0; i < nL3e; i++) {
145 mm_enter_pa(L2page_pa + i * PAGE_SIZE,
146 L2page_va + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
147 memset((void *)(L2page_va + i * PAGE_SIZE), 0, PAGE_SIZE);
148 }
149
150 /*
151 * Now link the levels together.
152 */
153 pdir = (pt_entry_t *)L3page_va;
154 for (i = 0, idx = L3e_idx; i < nL3e; i++, idx++) {
155 pdir[idx] = (L2page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
156 }
157
158 pdir = (pt_entry_t *)L2page_va;
159 for (i = 0, idx = L2e_idx; i < nL2e; i++, idx++) {
160 pdir[idx] = (L1page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
161 }
162
163 /*
164 * Finally, link the sub-tree into the tree.
165 */
166 L4_BASE[L4e_idx] = L3page_pa | PG_V | PG_RW;
167 }
168
169 /*
170 * Select a random VA, and create a page tree. The size of this tree is
171 * actually hard-coded, and matches the one created by the generic NetBSD
172 * locore.
173 */
174 static vaddr_t
175 mm_rand_base()
176 {
177 vaddr_t randva;
178 uint64_t rnd;
179 size_t size;
180
181 size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
182
183 /* yes, this is ridiculous */
184 rnd = rdtsc();
185 randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size),
186 PAGE_SIZE);
187
188 mm_map_va(randva, randva + size);
189
190 return randva;
191 }
192
193 /*
194 * Virtual address space of the kernel:
195 * +---------------+---------------------+------------------+-------------+
196 * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM |
197 * +---------------+---------------------+------------------+-------------+
198 * We basically choose a random VA, and map everything contiguously starting
199 * from there. Note that the physical pages allocated by mm_palloc are part
200 * of the BOOTSTRAP TABLES.
201 */
202 vaddr_t
203 mm_map_kernel()
204 {
205 size_t i, npages, size;
206 vaddr_t baseva;
207
208 size = (pa_avail - kernpa_start);
209 baseva = mm_rand_base();
210 npages = size / PAGE_SIZE;
211
212 /* Enter the whole area linearly */
213 for (i = 0; i < npages; i++) {
214 mm_enter_pa(kernpa_start + i * PAGE_SIZE,
215 baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
216 }
217
218 /* Enter the ISA I/O MEM */
219 iom_base = baseva + npages * PAGE_SIZE;
220 npages = IOM_SIZE / PAGE_SIZE;
221 for (i = 0; i < npages; i++) {
222 mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
223 iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
224 }
225
226 return baseva;
227 }
228