mm.c revision 1.1 1 1.1 maxv /* $NetBSD: mm.c,v 1.1 2017/10/10 09:29:14 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.1 maxv * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
5 1.1 maxv *
6 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
7 1.1 maxv * by Maxime Villard.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include "prekern.h"
32 1.1 maxv
33 1.1 maxv static const pt_entry_t protection_codes[3] = {
34 1.1 maxv [MM_PROT_READ] = PG_RO | PG_NX,
35 1.1 maxv [MM_PROT_WRITE] = PG_RW | PG_NX,
36 1.1 maxv [MM_PROT_EXECUTE] = PG_RO,
37 1.1 maxv /* RWX does not exist */
38 1.1 maxv };
39 1.1 maxv
40 1.1 maxv extern paddr_t kernpa_start, kernpa_end;
41 1.1 maxv vaddr_t iom_base;
42 1.1 maxv
43 1.1 maxv paddr_t pa_avail = 0;
44 1.1 maxv static vaddr_t va_avail = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
45 1.1 maxv static vaddr_t va_end = (PREKERNBASE + (NKL2_KIMG_ENTRIES + 1) * NBPD_L2);
46 1.1 maxv
47 1.1 maxv void
48 1.1 maxv mm_init(paddr_t first_pa)
49 1.1 maxv {
50 1.1 maxv pa_avail = first_pa;
51 1.1 maxv }
52 1.1 maxv
53 1.1 maxv static paddr_t
54 1.1 maxv mm_palloc(size_t npages)
55 1.1 maxv {
56 1.1 maxv paddr_t pa = pa_avail;
57 1.1 maxv pa_avail += npages * PAGE_SIZE;
58 1.1 maxv return pa;
59 1.1 maxv }
60 1.1 maxv
61 1.1 maxv static vaddr_t
62 1.1 maxv mm_valloc(size_t npages)
63 1.1 maxv {
64 1.1 maxv vaddr_t va = va_avail;
65 1.1 maxv va_avail += npages * PAGE_SIZE;
66 1.1 maxv if (va_avail > va_end) {
67 1.1 maxv fatal("mm_valloc: no VA left");
68 1.1 maxv }
69 1.1 maxv return va;
70 1.1 maxv }
71 1.1 maxv
72 1.1 maxv static void
73 1.1 maxv mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
74 1.1 maxv {
75 1.1 maxv PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
76 1.1 maxv }
77 1.1 maxv
78 1.1 maxv static void
79 1.1 maxv mm_flush_va(vaddr_t va)
80 1.1 maxv {
81 1.1 maxv asm volatile("invlpg (%0)" ::"r" (va) : "memory");
82 1.1 maxv }
83 1.1 maxv
84 1.1 maxv paddr_t
85 1.1 maxv mm_vatopa(vaddr_t va)
86 1.1 maxv {
87 1.1 maxv return (PTE_BASE[pl1_i(va)] & PG_FRAME);
88 1.1 maxv }
89 1.1 maxv
90 1.1 maxv void
91 1.1 maxv mm_mprotect(vaddr_t startva, size_t size, int prot)
92 1.1 maxv {
93 1.1 maxv size_t i, npages;
94 1.1 maxv vaddr_t va;
95 1.1 maxv paddr_t pa;
96 1.1 maxv
97 1.1 maxv ASSERT(size % PAGE_SIZE == 0);
98 1.1 maxv npages = size / PAGE_SIZE;
99 1.1 maxv
100 1.1 maxv for (i = 0; i < npages; i++) {
101 1.1 maxv va = startva + i * PAGE_SIZE;
102 1.1 maxv pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
103 1.1 maxv mm_enter_pa(pa, va, prot);
104 1.1 maxv mm_flush_va(va);
105 1.1 maxv }
106 1.1 maxv }
107 1.1 maxv
108 1.1 maxv static void
109 1.1 maxv mm_map_va(vaddr_t startva, vaddr_t endva)
110 1.1 maxv {
111 1.1 maxv size_t i, idx, size, nL4e, nL3e, nL2e;
112 1.1 maxv size_t L4e_idx, L3e_idx, L2e_idx;
113 1.1 maxv vaddr_t L3page_va, L2page_va;
114 1.1 maxv paddr_t L3page_pa, L2page_pa, L1page_pa;
115 1.1 maxv pd_entry_t *pdir;
116 1.1 maxv
117 1.1 maxv /*
118 1.1 maxv * Initialize constants.
119 1.1 maxv */
120 1.1 maxv size = endva - startva;
121 1.1 maxv nL4e = roundup(size, NBPD_L4) / NBPD_L4;
122 1.1 maxv nL3e = roundup(size, NBPD_L3) / NBPD_L3;
123 1.1 maxv nL2e = roundup(size, NBPD_L2) / NBPD_L2;
124 1.1 maxv L4e_idx = pl4_i(startva);
125 1.1 maxv L3e_idx = pl3_i(startva % NBPD_L4);
126 1.1 maxv L2e_idx = pl2_i(startva % NBPD_L3);
127 1.1 maxv
128 1.1 maxv /*
129 1.1 maxv * Map the sub-tree itself.
130 1.1 maxv */
131 1.1 maxv L3page_va = mm_valloc(nL4e);
132 1.1 maxv L3page_pa = mm_palloc(nL4e);
133 1.1 maxv L2page_va = mm_valloc(nL3e);
134 1.1 maxv L2page_pa = mm_palloc(nL3e);
135 1.1 maxv
136 1.1 maxv L1page_pa = mm_palloc(nL2e);
137 1.1 maxv
138 1.1 maxv for (i = 0; i < nL4e; i++) {
139 1.1 maxv mm_enter_pa(L3page_pa + i * PAGE_SIZE,
140 1.1 maxv L3page_va + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
141 1.1 maxv memset((void *)(L3page_va + i * PAGE_SIZE), 0, PAGE_SIZE);
142 1.1 maxv }
143 1.1 maxv
144 1.1 maxv for (i = 0; i < nL3e; i++) {
145 1.1 maxv mm_enter_pa(L2page_pa + i * PAGE_SIZE,
146 1.1 maxv L2page_va + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
147 1.1 maxv memset((void *)(L2page_va + i * PAGE_SIZE), 0, PAGE_SIZE);
148 1.1 maxv }
149 1.1 maxv
150 1.1 maxv /*
151 1.1 maxv * Now link the levels together.
152 1.1 maxv */
153 1.1 maxv pdir = (pt_entry_t *)L3page_va;
154 1.1 maxv for (i = 0, idx = L3e_idx; i < nL3e; i++, idx++) {
155 1.1 maxv pdir[idx] = (L2page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
156 1.1 maxv }
157 1.1 maxv
158 1.1 maxv pdir = (pt_entry_t *)L2page_va;
159 1.1 maxv for (i = 0, idx = L2e_idx; i < nL2e; i++, idx++) {
160 1.1 maxv pdir[idx] = (L1page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
161 1.1 maxv }
162 1.1 maxv
163 1.1 maxv /*
164 1.1 maxv * Finally, link the sub-tree into the tree.
165 1.1 maxv */
166 1.1 maxv L4_BASE[L4e_idx] = L3page_pa | PG_V | PG_RW;
167 1.1 maxv }
168 1.1 maxv
169 1.1 maxv /*
170 1.1 maxv * Select a random VA, and create a page tree. The size of this tree is
171 1.1 maxv * actually hard-coded, and matches the one created by the generic NetBSD
172 1.1 maxv * locore.
173 1.1 maxv */
174 1.1 maxv static vaddr_t
175 1.1 maxv mm_rand_base()
176 1.1 maxv {
177 1.1 maxv vaddr_t randva;
178 1.1 maxv uint64_t rnd;
179 1.1 maxv size_t size;
180 1.1 maxv
181 1.1 maxv size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
182 1.1 maxv
183 1.1 maxv /* yes, this is ridiculous */
184 1.1 maxv rnd = rdtsc();
185 1.1 maxv randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size),
186 1.1 maxv PAGE_SIZE);
187 1.1 maxv
188 1.1 maxv mm_map_va(randva, randva + size);
189 1.1 maxv
190 1.1 maxv return randva;
191 1.1 maxv }
192 1.1 maxv
193 1.1 maxv /*
194 1.1 maxv * Virtual address space of the kernel:
195 1.1 maxv * +---------------+---------------------+------------------+-------------+
196 1.1 maxv * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM |
197 1.1 maxv * +---------------+---------------------+------------------+-------------+
198 1.1 maxv * We basically choose a random VA, and map everything contiguously starting
199 1.1 maxv * from there. Note that the physical pages allocated by mm_palloc are part
200 1.1 maxv * of the BOOTSTRAP TABLES.
201 1.1 maxv */
202 1.1 maxv vaddr_t
203 1.1 maxv mm_map_kernel()
204 1.1 maxv {
205 1.1 maxv size_t i, npages, size;
206 1.1 maxv vaddr_t baseva;
207 1.1 maxv
208 1.1 maxv size = (pa_avail - kernpa_start);
209 1.1 maxv baseva = mm_rand_base();
210 1.1 maxv npages = size / PAGE_SIZE;
211 1.1 maxv
212 1.1 maxv /* Enter the whole area linearly */
213 1.1 maxv for (i = 0; i < npages; i++) {
214 1.1 maxv mm_enter_pa(kernpa_start + i * PAGE_SIZE,
215 1.1 maxv baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
216 1.1 maxv }
217 1.1 maxv
218 1.1 maxv /* Enter the ISA I/O MEM */
219 1.1 maxv iom_base = baseva + npages * PAGE_SIZE;
220 1.1 maxv npages = IOM_SIZE / PAGE_SIZE;
221 1.1 maxv for (i = 0; i < npages; i++) {
222 1.1 maxv mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
223 1.1 maxv iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
224 1.1 maxv }
225 1.1 maxv
226 1.1 maxv return baseva;
227 1.1 maxv }
228