asan.h revision 1.8 1 /* $NetBSD: asan.h,v 1.8 2020/09/05 16:30:10 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/ksyms.h>
33
34 #include <uvm/uvm.h>
35
36 #include <amd64/pmap.h>
37 #include <amd64/vmparam.h>
38
39 #ifdef __HAVE_PCPU_AREA
40 #error "PCPU area not allowed with KASAN"
41 #endif
42 #ifdef __HAVE_DIRECT_MAP
43 #error "DMAP not allowed with KASAN"
44 #endif
45
46 #define __MD_VIRTUAL_SHIFT 47 /* 48bit address space, cut half */
47 #define __MD_KERNMEM_BASE 0xFFFF800000000000 /* kern mem base address */
48
49 #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
50 #define KASAN_MD_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
51 #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
52
53 /* -------------------------------------------------------------------------- */
54
55 /*
56 * Early mapping, used to map just the stack at boot time. We rely on the fact
57 * that VA = PA + KERNBASE.
58 */
59
60 static bool __md_early __read_mostly = true;
61 static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
62 static size_t __md_earlytaken = 0;
63
64 static paddr_t
65 __md_early_palloc(void)
66 {
67 paddr_t ret;
68
69 KASSERT(__md_earlytaken < 8);
70
71 ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
72 __md_earlytaken++;
73
74 ret -= KERNBASE;
75
76 return ret;
77 }
78
79 static void
80 __md_early_shadow_map_page(vaddr_t va)
81 {
82 extern struct bootspace bootspace;
83 const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
84 pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir;
85 paddr_t pa;
86
87 if (!pmap_valid_entry(pdir[pl4_pi(va)])) {
88 pa = __md_early_palloc();
89 pdir[pl4_pi(va)] = pa | pteflags;
90 }
91 pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE);
92
93 if (!pmap_valid_entry(pdir[pl3_pi(va)])) {
94 pa = __md_early_palloc();
95 pdir[pl3_pi(va)] = pa | pteflags;
96 }
97 pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE);
98
99 if (!pmap_valid_entry(pdir[pl2_pi(va)])) {
100 pa = __md_early_palloc();
101 pdir[pl2_pi(va)] = pa | pteflags;
102 }
103 pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE);
104
105 if (!pmap_valid_entry(pdir[pl1_pi(va)])) {
106 pa = __md_early_palloc();
107 pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g;
108 }
109 }
110
111 /* -------------------------------------------------------------------------- */
112
113 static inline int8_t *
114 kasan_md_addr_to_shad(const void *addr)
115 {
116 vaddr_t va = (vaddr_t)addr;
117 return (int8_t *)(KASAN_MD_SHADOW_START +
118 ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
119 }
120
121 static inline bool
122 kasan_md_unsupported(vaddr_t addr)
123 {
124 return (addr >= (vaddr_t)PTE_BASE &&
125 addr < ((vaddr_t)PTE_BASE + NBPD_L4));
126 }
127
128 static paddr_t
129 __md_palloc(void)
130 {
131 /* The page is zeroed. */
132 return pmap_get_physpage();
133 }
134
135 static inline paddr_t
136 __md_palloc_large(void)
137 {
138 struct pglist pglist;
139 int ret;
140
141 if (!uvm.page_init_done)
142 return 0;
143
144 ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
145 &pglist, 1, 0);
146 if (ret != 0)
147 return 0;
148
149 /* The page may not be zeroed. */
150 return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
151 }
152
153 static void
154 kasan_md_shadow_map_page(vaddr_t va)
155 {
156 const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
157 paddr_t pa;
158
159 if (__predict_false(__md_early)) {
160 __md_early_shadow_map_page(va);
161 return;
162 }
163
164 if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
165 pa = __md_palloc();
166 L4_BASE[pl4_i(va)] = pa | pteflags;
167 }
168 if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
169 pa = __md_palloc();
170 L3_BASE[pl3_i(va)] = pa | pteflags;
171 }
172 if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
173 if ((pa = __md_palloc_large()) != 0) {
174 L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
175 pmap_pg_g;
176 __insn_barrier();
177 __builtin_memset((void *)va, 0, NBPD_L2);
178 return;
179 }
180 pa = __md_palloc();
181 L2_BASE[pl2_i(va)] = pa | pteflags;
182 } else if (L2_BASE[pl2_i(va)] & PTE_PS) {
183 return;
184 }
185 if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
186 pa = __md_palloc();
187 L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
188 }
189 }
190
191 /*
192 * Map only the current stack. We will map the rest in kasan_init.
193 */
194 static void
195 kasan_md_early_init(void *stack)
196 {
197 kasan_shadow_map(stack, USPACE);
198 __md_early = false;
199 }
200
201 /*
202 * Create the shadow mapping. We don't create the 'User' area, because we
203 * exclude it from the monitoring. The 'Main' area is created dynamically
204 * in pmap_growkernel.
205 */
206 static void
207 kasan_md_init(void)
208 {
209 extern struct bootspace bootspace;
210 size_t i;
211
212 CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
213
214 /* Kernel. */
215 for (i = 0; i < BTSPACE_NSEGS; i++) {
216 if (bootspace.segs[i].type == BTSEG_NONE) {
217 continue;
218 }
219 kasan_shadow_map((void *)bootspace.segs[i].va,
220 bootspace.segs[i].sz);
221 }
222
223 /* Boot region. */
224 kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
225
226 /* Module map. */
227 kasan_shadow_map((void *)bootspace.smodule,
228 (size_t)(bootspace.emodule - bootspace.smodule));
229
230 /* The bootstrap spare va. */
231 kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
232 }
233
234 static inline bool
235 __md_unwind_end(const char *name)
236 {
237 if (!strcmp(name, "syscall") ||
238 !strcmp(name, "alltraps") ||
239 !strcmp(name, "handle_syscall") ||
240 !strncmp(name, "Xtrap", 5) ||
241 !strncmp(name, "Xintr", 5) ||
242 !strncmp(name, "Xhandle", 7) ||
243 !strncmp(name, "Xresume", 7) ||
244 !strncmp(name, "Xstray", 6) ||
245 !strncmp(name, "Xhold", 5) ||
246 !strncmp(name, "Xrecurse", 8) ||
247 !strcmp(name, "Xdoreti") ||
248 !strncmp(name, "Xsoft", 5)) {
249 return true;
250 }
251
252 return false;
253 }
254
255 static void
256 kasan_md_unwind(void)
257 {
258 uint64_t *rbp, rip;
259 const char *mod;
260 const char *sym;
261 size_t nsym;
262 int error;
263
264 rbp = (uint64_t *)__builtin_frame_address(0);
265 nsym = 0;
266
267 while (1) {
268 /* 8(%rbp) contains the saved %rip. */
269 rip = *(rbp + 1);
270
271 if (rip < KERNBASE) {
272 break;
273 }
274 error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
275 if (error) {
276 break;
277 }
278 printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
279 if (__md_unwind_end(sym)) {
280 break;
281 }
282
283 rbp = (uint64_t *)*(rbp);
284 if (rbp == 0) {
285 break;
286 }
287 nsym++;
288
289 if (nsym >= 15) {
290 break;
291 }
292 }
293 }
294