mm.c revision 1.3 1 1.3 maxv /* $NetBSD: mm.c,v 1.3 2017/10/18 17:12:42 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.1 maxv * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
5 1.1 maxv *
6 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
7 1.1 maxv * by Maxime Villard.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include "prekern.h"
32 1.1 maxv
33 1.1 maxv static const pt_entry_t protection_codes[3] = {
34 1.1 maxv [MM_PROT_READ] = PG_RO | PG_NX,
35 1.1 maxv [MM_PROT_WRITE] = PG_RW | PG_NX,
36 1.1 maxv [MM_PROT_EXECUTE] = PG_RO,
37 1.1 maxv /* RWX does not exist */
38 1.1 maxv };
39 1.1 maxv
40 1.1 maxv extern paddr_t kernpa_start, kernpa_end;
41 1.1 maxv vaddr_t iom_base;
42 1.1 maxv
43 1.1 maxv paddr_t pa_avail = 0;
44 1.2 maxv static const vaddr_t tmpva = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
45 1.1 maxv
46 1.1 maxv void
47 1.1 maxv mm_init(paddr_t first_pa)
48 1.1 maxv {
49 1.1 maxv pa_avail = first_pa;
50 1.1 maxv }
51 1.1 maxv
52 1.1 maxv static void
53 1.1 maxv mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
54 1.1 maxv {
55 1.1 maxv PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
56 1.1 maxv }
57 1.1 maxv
58 1.1 maxv static void
59 1.1 maxv mm_flush_va(vaddr_t va)
60 1.1 maxv {
61 1.1 maxv asm volatile("invlpg (%0)" ::"r" (va) : "memory");
62 1.1 maxv }
63 1.1 maxv
64 1.2 maxv static paddr_t
65 1.2 maxv mm_palloc(size_t npages)
66 1.2 maxv {
67 1.2 maxv paddr_t pa;
68 1.2 maxv size_t i;
69 1.2 maxv
70 1.2 maxv /* Allocate the physical pages */
71 1.2 maxv pa = pa_avail;
72 1.2 maxv pa_avail += npages * PAGE_SIZE;
73 1.2 maxv
74 1.2 maxv /* Zero them out */
75 1.2 maxv for (i = 0; i < npages; i++) {
76 1.2 maxv mm_enter_pa(pa + i * PAGE_SIZE, tmpva,
77 1.2 maxv MM_PROT_READ|MM_PROT_WRITE);
78 1.2 maxv mm_flush_va(tmpva);
79 1.2 maxv memset((void *)tmpva, 0, PAGE_SIZE);
80 1.2 maxv }
81 1.2 maxv
82 1.2 maxv return pa;
83 1.2 maxv }
84 1.2 maxv
85 1.3 maxv static bool
86 1.3 maxv mm_pte_is_valid(pt_entry_t pte)
87 1.3 maxv {
88 1.3 maxv return ((pte & PG_V) != 0);
89 1.3 maxv }
90 1.3 maxv
91 1.1 maxv paddr_t
92 1.1 maxv mm_vatopa(vaddr_t va)
93 1.1 maxv {
94 1.1 maxv return (PTE_BASE[pl1_i(va)] & PG_FRAME);
95 1.1 maxv }
96 1.1 maxv
97 1.1 maxv void
98 1.1 maxv mm_mprotect(vaddr_t startva, size_t size, int prot)
99 1.1 maxv {
100 1.1 maxv size_t i, npages;
101 1.1 maxv vaddr_t va;
102 1.1 maxv paddr_t pa;
103 1.1 maxv
104 1.1 maxv ASSERT(size % PAGE_SIZE == 0);
105 1.1 maxv npages = size / PAGE_SIZE;
106 1.1 maxv
107 1.1 maxv for (i = 0; i < npages; i++) {
108 1.1 maxv va = startva + i * PAGE_SIZE;
109 1.1 maxv pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
110 1.1 maxv mm_enter_pa(pa, va, prot);
111 1.1 maxv mm_flush_va(va);
112 1.1 maxv }
113 1.1 maxv }
114 1.1 maxv
115 1.1 maxv static void
116 1.2 maxv mm_map_tree(vaddr_t startva, vaddr_t endva)
117 1.1 maxv {
118 1.2 maxv size_t i, size, nL4e, nL3e, nL2e;
119 1.1 maxv size_t L4e_idx, L3e_idx, L2e_idx;
120 1.3 maxv paddr_t pa;
121 1.3 maxv
122 1.3 maxv size = endva - startva;
123 1.1 maxv
124 1.1 maxv /*
125 1.3 maxv * Build L4.
126 1.1 maxv */
127 1.3 maxv L4e_idx = pl4_i(startva);
128 1.1 maxv nL4e = roundup(size, NBPD_L4) / NBPD_L4;
129 1.3 maxv ASSERT(L4e_idx == 511);
130 1.2 maxv ASSERT(nL4e == 1);
131 1.3 maxv if (!mm_pte_is_valid(L4_BASE[L4e_idx])) {
132 1.3 maxv pa = mm_palloc(1);
133 1.3 maxv L4_BASE[L4e_idx] = pa | PG_V | PG_RW;
134 1.3 maxv }
135 1.1 maxv
136 1.1 maxv /*
137 1.3 maxv * Build L3.
138 1.1 maxv */
139 1.3 maxv L3e_idx = pl3_i(startva);
140 1.3 maxv nL3e = roundup(size, NBPD_L3) / NBPD_L3;
141 1.3 maxv for (i = 0; i < nL3e; i++) {
142 1.3 maxv if (mm_pte_is_valid(L3_BASE[L3e_idx+i])) {
143 1.3 maxv continue;
144 1.3 maxv }
145 1.3 maxv pa = mm_palloc(1);
146 1.3 maxv L3_BASE[L3e_idx+i] = pa | PG_V | PG_RW;
147 1.3 maxv }
148 1.1 maxv
149 1.1 maxv /*
150 1.3 maxv * Build L2.
151 1.1 maxv */
152 1.3 maxv L2e_idx = pl2_i(startva);
153 1.3 maxv nL2e = roundup(size, NBPD_L2) / NBPD_L2;
154 1.2 maxv for (i = 0; i < nL2e; i++) {
155 1.3 maxv if (mm_pte_is_valid(L2_BASE[L2e_idx+i])) {
156 1.3 maxv continue;
157 1.3 maxv }
158 1.3 maxv pa = mm_palloc(1);
159 1.3 maxv L2_BASE[L2e_idx+i] = pa | PG_V | PG_RW;
160 1.1 maxv }
161 1.1 maxv }
162 1.1 maxv
163 1.1 maxv /*
164 1.1 maxv * Select a random VA, and create a page tree. The size of this tree is
165 1.1 maxv * actually hard-coded, and matches the one created by the generic NetBSD
166 1.1 maxv * locore.
167 1.1 maxv */
168 1.1 maxv static vaddr_t
169 1.1 maxv mm_rand_base()
170 1.1 maxv {
171 1.1 maxv vaddr_t randva;
172 1.1 maxv uint64_t rnd;
173 1.1 maxv size_t size;
174 1.1 maxv
175 1.1 maxv size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
176 1.1 maxv
177 1.1 maxv /* yes, this is ridiculous */
178 1.1 maxv rnd = rdtsc();
179 1.1 maxv randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size),
180 1.1 maxv PAGE_SIZE);
181 1.1 maxv
182 1.2 maxv mm_map_tree(randva, randva + size);
183 1.1 maxv
184 1.1 maxv return randva;
185 1.1 maxv }
186 1.1 maxv
187 1.1 maxv /*
188 1.1 maxv * Virtual address space of the kernel:
189 1.1 maxv * +---------------+---------------------+------------------+-------------+
190 1.1 maxv * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM |
191 1.1 maxv * +---------------+---------------------+------------------+-------------+
192 1.1 maxv * We basically choose a random VA, and map everything contiguously starting
193 1.1 maxv * from there. Note that the physical pages allocated by mm_palloc are part
194 1.1 maxv * of the BOOTSTRAP TABLES.
195 1.1 maxv */
196 1.1 maxv vaddr_t
197 1.1 maxv mm_map_kernel()
198 1.1 maxv {
199 1.1 maxv size_t i, npages, size;
200 1.1 maxv vaddr_t baseva;
201 1.1 maxv
202 1.1 maxv size = (pa_avail - kernpa_start);
203 1.1 maxv baseva = mm_rand_base();
204 1.1 maxv npages = size / PAGE_SIZE;
205 1.1 maxv
206 1.1 maxv /* Enter the whole area linearly */
207 1.1 maxv for (i = 0; i < npages; i++) {
208 1.1 maxv mm_enter_pa(kernpa_start + i * PAGE_SIZE,
209 1.1 maxv baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
210 1.1 maxv }
211 1.1 maxv
212 1.1 maxv /* Enter the ISA I/O MEM */
213 1.1 maxv iom_base = baseva + npages * PAGE_SIZE;
214 1.1 maxv npages = IOM_SIZE / PAGE_SIZE;
215 1.1 maxv for (i = 0; i < npages; i++) {
216 1.1 maxv mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
217 1.1 maxv iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
218 1.1 maxv }
219 1.1 maxv
220 1.1 maxv return baseva;
221 1.1 maxv }
222