asan.h revision 1.4 1 /* $NetBSD: asan.h,v 1.4 2020/04/15 17:00:07 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/ksyms.h>
33
34 #include <amd64/pmap.h>
35 #include <amd64/vmparam.h>
36
37 #ifdef __HAVE_PCPU_AREA
38 #error "PCPU area not allowed with KASAN"
39 #endif
40 #ifdef __HAVE_DIRECT_MAP
41 #error "DMAP not allowed with KASAN"
42 #endif
43
44 #define __MD_VIRTUAL_SHIFT 47 /* 48bit address space, cut half */
45 #define __MD_CANONICAL_BASE 0xFFFF800000000000
46
47 #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
48 #define KASAN_MD_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
49 #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
50
51 static bool __md_early __read_mostly = true;
52 static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
53 static size_t __md_earlytaken = 0;
54
55 static inline int8_t *
56 kasan_md_addr_to_shad(const void *addr)
57 {
58 vaddr_t va = (vaddr_t)addr;
59 return (int8_t *)(KASAN_MD_SHADOW_START +
60 ((va - __MD_CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
61 }
62
63 static inline bool
64 kasan_md_unsupported(vaddr_t addr)
65 {
66 return (addr >= (vaddr_t)PTE_BASE &&
67 addr < ((vaddr_t)PTE_BASE + NBPD_L4));
68 }
69
70 static paddr_t
71 __md_early_palloc(void)
72 {
73 paddr_t ret;
74
75 KASSERT(__md_earlytaken < 8);
76
77 ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
78 __md_earlytaken++;
79
80 ret -= KERNBASE;
81
82 return ret;
83 }
84
85 static paddr_t
86 __md_palloc(void)
87 {
88 paddr_t pa;
89
90 if (__predict_false(__md_early))
91 pa = __md_early_palloc();
92 else
93 pa = pmap_get_physpage();
94
95 /* The page is zeroed. */
96 return pa;
97 }
98
99 static inline paddr_t
100 __md_palloc_large(void)
101 {
102 struct pglist pglist;
103 int ret;
104
105 if (__predict_false(__md_early))
106 return 0;
107 if (!uvm.page_init_done)
108 return 0;
109
110 ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
111 &pglist, 1, 0);
112 if (ret != 0)
113 return 0;
114
115 /* The page may not be zeroed. */
116 return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
117 }
118
119 static void
120 kasan_md_shadow_map_page(vaddr_t va)
121 {
122 const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
123 paddr_t pa;
124
125 if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
126 pa = __md_palloc();
127 L4_BASE[pl4_i(va)] = pa | pteflags;
128 }
129 if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
130 pa = __md_palloc();
131 L3_BASE[pl3_i(va)] = pa | pteflags;
132 }
133 if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
134 if ((pa = __md_palloc_large()) != 0) {
135 L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
136 pmap_pg_g;
137 __insn_barrier();
138 __builtin_memset((void *)va, 0, NBPD_L2);
139 return;
140 }
141 pa = __md_palloc();
142 L2_BASE[pl2_i(va)] = pa | pteflags;
143 } else if (L2_BASE[pl2_i(va)] & PTE_PS) {
144 return;
145 }
146 if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
147 pa = __md_palloc();
148 L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
149 }
150 }
151
152 /*
153 * Map only the current stack. We will map the rest in kasan_init.
154 */
155 static void
156 kasan_md_early_init(void *stack)
157 {
158 kasan_shadow_map(stack, USPACE);
159 __md_early = false;
160 }
161
162 /*
163 * Create the shadow mapping. We don't create the 'User' area, because we
164 * exclude it from the monitoring. The 'Main' area is created dynamically
165 * in pmap_growkernel.
166 */
167 static void
168 kasan_md_init(void)
169 {
170 extern struct bootspace bootspace;
171 size_t i;
172
173 CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
174
175 /* Kernel. */
176 for (i = 0; i < BTSPACE_NSEGS; i++) {
177 if (bootspace.segs[i].type == BTSEG_NONE) {
178 continue;
179 }
180 kasan_shadow_map((void *)bootspace.segs[i].va,
181 bootspace.segs[i].sz);
182 }
183
184 /* Boot region. */
185 kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
186
187 /* Module map. */
188 kasan_shadow_map((void *)bootspace.smodule,
189 (size_t)(bootspace.emodule - bootspace.smodule));
190
191 /* The bootstrap spare va. */
192 kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
193 }
194
195 static inline bool
196 __md_unwind_end(const char *name)
197 {
198 if (!strcmp(name, "syscall") ||
199 !strcmp(name, "alltraps") ||
200 !strcmp(name, "handle_syscall") ||
201 !strncmp(name, "Xtrap", 5) ||
202 !strncmp(name, "Xintr", 5) ||
203 !strncmp(name, "Xhandle", 7) ||
204 !strncmp(name, "Xresume", 7) ||
205 !strncmp(name, "Xstray", 6) ||
206 !strncmp(name, "Xhold", 5) ||
207 !strncmp(name, "Xrecurse", 8) ||
208 !strcmp(name, "Xdoreti") ||
209 !strncmp(name, "Xsoft", 5)) {
210 return true;
211 }
212
213 return false;
214 }
215
216 static void
217 kasan_md_unwind(void)
218 {
219 uint64_t *rbp, rip;
220 const char *mod;
221 const char *sym;
222 size_t nsym;
223 int error;
224
225 rbp = (uint64_t *)__builtin_frame_address(0);
226 nsym = 0;
227
228 while (1) {
229 /* 8(%rbp) contains the saved %rip. */
230 rip = *(rbp + 1);
231
232 if (rip < KERNBASE) {
233 break;
234 }
235 error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
236 if (error) {
237 break;
238 }
239 printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
240 if (__md_unwind_end(sym)) {
241 break;
242 }
243
244 rbp = (uint64_t *)*(rbp);
245 if (rbp == 0) {
246 break;
247 }
248 nsym++;
249
250 if (nsym >= 15) {
251 break;
252 }
253 }
254 }
255