mm.c revision 1.2 1 1.1 maxv /* $NetBSD: mm.c,v 1.2 2017/10/15 06:37:32 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.1 maxv * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
5 1.1 maxv *
6 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
7 1.1 maxv * by Maxime Villard.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include "prekern.h"
32 1.1 maxv
33 1.1 maxv static const pt_entry_t protection_codes[3] = {
34 1.1 maxv [MM_PROT_READ] = PG_RO | PG_NX,
35 1.1 maxv [MM_PROT_WRITE] = PG_RW | PG_NX,
36 1.1 maxv [MM_PROT_EXECUTE] = PG_RO,
37 1.1 maxv /* RWX does not exist */
38 1.1 maxv };
39 1.1 maxv
40 1.1 maxv extern paddr_t kernpa_start, kernpa_end;
41 1.1 maxv vaddr_t iom_base;
42 1.1 maxv
43 1.1 maxv paddr_t pa_avail = 0;
44 1.2 maxv static const vaddr_t tmpva = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
45 1.1 maxv
46 1.1 maxv void
47 1.1 maxv mm_init(paddr_t first_pa)
48 1.1 maxv {
49 1.1 maxv pa_avail = first_pa;
50 1.1 maxv }
51 1.1 maxv
52 1.1 maxv static void
53 1.1 maxv mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
54 1.1 maxv {
55 1.1 maxv PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
56 1.1 maxv }
57 1.1 maxv
58 1.1 maxv static void
59 1.1 maxv mm_flush_va(vaddr_t va)
60 1.1 maxv {
61 1.1 maxv asm volatile("invlpg (%0)" ::"r" (va) : "memory");
62 1.1 maxv }
63 1.1 maxv
64 1.2 maxv static paddr_t
65 1.2 maxv mm_palloc(size_t npages)
66 1.2 maxv {
67 1.2 maxv paddr_t pa;
68 1.2 maxv size_t i;
69 1.2 maxv
70 1.2 maxv /* Allocate the physical pages */
71 1.2 maxv pa = pa_avail;
72 1.2 maxv pa_avail += npages * PAGE_SIZE;
73 1.2 maxv
74 1.2 maxv /* Zero them out */
75 1.2 maxv for (i = 0; i < npages; i++) {
76 1.2 maxv mm_enter_pa(pa + i * PAGE_SIZE, tmpva,
77 1.2 maxv MM_PROT_READ|MM_PROT_WRITE);
78 1.2 maxv mm_flush_va(tmpva);
79 1.2 maxv memset((void *)tmpva, 0, PAGE_SIZE);
80 1.2 maxv }
81 1.2 maxv
82 1.2 maxv return pa;
83 1.2 maxv }
84 1.2 maxv
85 1.1 maxv paddr_t
86 1.1 maxv mm_vatopa(vaddr_t va)
87 1.1 maxv {
88 1.1 maxv return (PTE_BASE[pl1_i(va)] & PG_FRAME);
89 1.1 maxv }
90 1.1 maxv
91 1.1 maxv void
92 1.1 maxv mm_mprotect(vaddr_t startva, size_t size, int prot)
93 1.1 maxv {
94 1.1 maxv size_t i, npages;
95 1.1 maxv vaddr_t va;
96 1.1 maxv paddr_t pa;
97 1.1 maxv
98 1.1 maxv ASSERT(size % PAGE_SIZE == 0);
99 1.1 maxv npages = size / PAGE_SIZE;
100 1.1 maxv
101 1.1 maxv for (i = 0; i < npages; i++) {
102 1.1 maxv va = startva + i * PAGE_SIZE;
103 1.1 maxv pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
104 1.1 maxv mm_enter_pa(pa, va, prot);
105 1.1 maxv mm_flush_va(va);
106 1.1 maxv }
107 1.1 maxv }
108 1.1 maxv
109 1.1 maxv static void
110 1.2 maxv mm_map_tree(vaddr_t startva, vaddr_t endva)
111 1.1 maxv {
112 1.2 maxv size_t i, size, nL4e, nL3e, nL2e;
113 1.1 maxv size_t L4e_idx, L3e_idx, L2e_idx;
114 1.1 maxv paddr_t L3page_pa, L2page_pa, L1page_pa;
115 1.1 maxv
116 1.1 maxv /*
117 1.1 maxv * Initialize constants.
118 1.1 maxv */
119 1.1 maxv size = endva - startva;
120 1.1 maxv nL4e = roundup(size, NBPD_L4) / NBPD_L4;
121 1.1 maxv nL3e = roundup(size, NBPD_L3) / NBPD_L3;
122 1.1 maxv nL2e = roundup(size, NBPD_L2) / NBPD_L2;
123 1.1 maxv L4e_idx = pl4_i(startva);
124 1.2 maxv L3e_idx = pl3_i(startva);
125 1.2 maxv L2e_idx = pl2_i(startva);
126 1.2 maxv
127 1.2 maxv ASSERT(nL4e == 1);
128 1.2 maxv ASSERT(L4e_idx == 511);
129 1.1 maxv
130 1.1 maxv /*
131 1.2 maxv * Allocate the physical pages.
132 1.1 maxv */
133 1.1 maxv L3page_pa = mm_palloc(nL4e);
134 1.1 maxv L2page_pa = mm_palloc(nL3e);
135 1.1 maxv L1page_pa = mm_palloc(nL2e);
136 1.1 maxv
137 1.1 maxv /*
138 1.2 maxv * Build the branch in the page tree. We link the levels together,
139 1.2 maxv * from L4 to L1.
140 1.1 maxv */
141 1.2 maxv L4_BASE[L4e_idx] = L3page_pa | PG_V | PG_RW;
142 1.2 maxv for (i = 0; i < nL3e; i++) {
143 1.2 maxv L3_BASE[L3e_idx+i] = (L2page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
144 1.1 maxv }
145 1.2 maxv for (i = 0; i < nL2e; i++) {
146 1.2 maxv L2_BASE[L2e_idx+i] = (L1page_pa + i * PAGE_SIZE) | PG_V | PG_RW;
147 1.1 maxv }
148 1.1 maxv }
149 1.1 maxv
150 1.1 maxv /*
151 1.1 maxv * Select a random VA, and create a page tree. The size of this tree is
152 1.1 maxv * actually hard-coded, and matches the one created by the generic NetBSD
153 1.1 maxv * locore.
154 1.1 maxv */
155 1.1 maxv static vaddr_t
156 1.1 maxv mm_rand_base()
157 1.1 maxv {
158 1.1 maxv vaddr_t randva;
159 1.1 maxv uint64_t rnd;
160 1.1 maxv size_t size;
161 1.1 maxv
162 1.1 maxv size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
163 1.1 maxv
164 1.1 maxv /* yes, this is ridiculous */
165 1.1 maxv rnd = rdtsc();
166 1.1 maxv randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size),
167 1.1 maxv PAGE_SIZE);
168 1.1 maxv
169 1.2 maxv mm_map_tree(randva, randva + size);
170 1.1 maxv
171 1.1 maxv return randva;
172 1.1 maxv }
173 1.1 maxv
174 1.1 maxv /*
175 1.1 maxv * Virtual address space of the kernel:
176 1.1 maxv * +---------------+---------------------+------------------+-------------+
177 1.1 maxv * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM |
178 1.1 maxv * +---------------+---------------------+------------------+-------------+
179 1.1 maxv * We basically choose a random VA, and map everything contiguously starting
180 1.1 maxv * from there. Note that the physical pages allocated by mm_palloc are part
181 1.1 maxv * of the BOOTSTRAP TABLES.
182 1.1 maxv */
183 1.1 maxv vaddr_t
184 1.1 maxv mm_map_kernel()
185 1.1 maxv {
186 1.1 maxv size_t i, npages, size;
187 1.1 maxv vaddr_t baseva;
188 1.1 maxv
189 1.1 maxv size = (pa_avail - kernpa_start);
190 1.1 maxv baseva = mm_rand_base();
191 1.1 maxv npages = size / PAGE_SIZE;
192 1.1 maxv
193 1.1 maxv /* Enter the whole area linearly */
194 1.1 maxv for (i = 0; i < npages; i++) {
195 1.1 maxv mm_enter_pa(kernpa_start + i * PAGE_SIZE,
196 1.1 maxv baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
197 1.1 maxv }
198 1.1 maxv
199 1.1 maxv /* Enter the ISA I/O MEM */
200 1.1 maxv iom_base = baseva + npages * PAGE_SIZE;
201 1.1 maxv npages = IOM_SIZE / PAGE_SIZE;
202 1.1 maxv for (i = 0; i < npages; i++) {
203 1.1 maxv mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
204 1.1 maxv iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
205 1.1 maxv }
206 1.1 maxv
207 1.1 maxv return baseva;
208 1.1 maxv }
209