asan.h revision 1.10 1 1.10 riastrad /* $NetBSD: asan.h,v 1.10 2022/08/20 23:15:36 riastradh Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.9 maxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.9 maxv * This code is part of the KASAN subsystem of the NetBSD kernel.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.9 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.9 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.9 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.9 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.9 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.9 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.9 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.9 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.9 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.9 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.9 maxv * SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include <sys/ksyms.h>
32 1.1 maxv
33 1.8 riastrad #include <uvm/uvm.h>
34 1.8 riastrad
35 1.1 maxv #include <amd64/pmap.h>
36 1.1 maxv #include <amd64/vmparam.h>
37 1.1 maxv
38 1.10 riastrad #include <x86/bootspace.h>
39 1.10 riastrad
40 1.1 maxv #ifdef __HAVE_PCPU_AREA
41 1.1 maxv #error "PCPU area not allowed with KASAN"
42 1.1 maxv #endif
43 1.1 maxv #ifdef __HAVE_DIRECT_MAP
44 1.1 maxv #error "DMAP not allowed with KASAN"
45 1.1 maxv #endif
46 1.1 maxv
47 1.1 maxv #define __MD_VIRTUAL_SHIFT 47 /* 48bit address space, cut half */
48 1.7 maxv #define __MD_KERNMEM_BASE 0xFFFF800000000000 /* kern mem base address */
49 1.1 maxv
50 1.1 maxv #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
51 1.1 maxv #define KASAN_MD_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
52 1.1 maxv #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
53 1.1 maxv
54 1.6 maxv /* -------------------------------------------------------------------------- */
55 1.6 maxv
56 1.6 maxv /*
57 1.6 maxv * Early mapping, used to map just the stack at boot time. We rely on the fact
58 1.6 maxv * that VA = PA + KERNBASE.
59 1.6 maxv */
60 1.6 maxv
61 1.1 maxv static bool __md_early __read_mostly = true;
62 1.1 maxv static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
63 1.1 maxv static size_t __md_earlytaken = 0;
64 1.1 maxv
65 1.1 maxv static paddr_t
66 1.1 maxv __md_early_palloc(void)
67 1.1 maxv {
68 1.1 maxv paddr_t ret;
69 1.1 maxv
70 1.1 maxv KASSERT(__md_earlytaken < 8);
71 1.1 maxv
72 1.1 maxv ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
73 1.1 maxv __md_earlytaken++;
74 1.1 maxv
75 1.1 maxv ret -= KERNBASE;
76 1.1 maxv
77 1.1 maxv return ret;
78 1.1 maxv }
79 1.1 maxv
80 1.6 maxv static void
81 1.6 maxv __md_early_shadow_map_page(vaddr_t va)
82 1.1 maxv {
83 1.6 maxv extern struct bootspace bootspace;
84 1.6 maxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
85 1.6 maxv pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir;
86 1.1 maxv paddr_t pa;
87 1.1 maxv
88 1.6 maxv if (!pmap_valid_entry(pdir[pl4_pi(va)])) {
89 1.6 maxv pa = __md_early_palloc();
90 1.6 maxv pdir[pl4_pi(va)] = pa | pteflags;
91 1.6 maxv }
92 1.6 maxv pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE);
93 1.6 maxv
94 1.6 maxv if (!pmap_valid_entry(pdir[pl3_pi(va)])) {
95 1.6 maxv pa = __md_early_palloc();
96 1.6 maxv pdir[pl3_pi(va)] = pa | pteflags;
97 1.6 maxv }
98 1.6 maxv pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE);
99 1.6 maxv
100 1.6 maxv if (!pmap_valid_entry(pdir[pl2_pi(va)])) {
101 1.6 maxv pa = __md_early_palloc();
102 1.6 maxv pdir[pl2_pi(va)] = pa | pteflags;
103 1.6 maxv }
104 1.6 maxv pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE);
105 1.6 maxv
106 1.6 maxv if (!pmap_valid_entry(pdir[pl1_pi(va)])) {
107 1.1 maxv pa = __md_early_palloc();
108 1.6 maxv pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g;
109 1.6 maxv }
110 1.6 maxv }
111 1.6 maxv
112 1.6 maxv /* -------------------------------------------------------------------------- */
113 1.6 maxv
114 1.6 maxv static inline int8_t *
115 1.6 maxv kasan_md_addr_to_shad(const void *addr)
116 1.6 maxv {
117 1.6 maxv vaddr_t va = (vaddr_t)addr;
118 1.6 maxv return (int8_t *)(KASAN_MD_SHADOW_START +
119 1.7 maxv ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
120 1.6 maxv }
121 1.1 maxv
122 1.6 maxv static inline bool
123 1.6 maxv kasan_md_unsupported(vaddr_t addr)
124 1.6 maxv {
125 1.6 maxv return (addr >= (vaddr_t)PTE_BASE &&
126 1.6 maxv addr < ((vaddr_t)PTE_BASE + NBPD_L4));
127 1.6 maxv }
128 1.6 maxv
129 1.6 maxv static paddr_t
130 1.6 maxv __md_palloc(void)
131 1.6 maxv {
132 1.4 maxv /* The page is zeroed. */
133 1.6 maxv return pmap_get_physpage();
134 1.1 maxv }
135 1.1 maxv
136 1.4 maxv static inline paddr_t
137 1.4 maxv __md_palloc_large(void)
138 1.4 maxv {
139 1.4 maxv struct pglist pglist;
140 1.4 maxv int ret;
141 1.4 maxv
142 1.4 maxv if (!uvm.page_init_done)
143 1.4 maxv return 0;
144 1.4 maxv
145 1.4 maxv ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
146 1.4 maxv &pglist, 1, 0);
147 1.4 maxv if (ret != 0)
148 1.4 maxv return 0;
149 1.4 maxv
150 1.4 maxv /* The page may not be zeroed. */
151 1.4 maxv return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
152 1.4 maxv }
153 1.4 maxv
154 1.1 maxv static void
155 1.1 maxv kasan_md_shadow_map_page(vaddr_t va)
156 1.1 maxv {
157 1.4 maxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
158 1.1 maxv paddr_t pa;
159 1.1 maxv
160 1.6 maxv if (__predict_false(__md_early)) {
161 1.6 maxv __md_early_shadow_map_page(va);
162 1.6 maxv return;
163 1.6 maxv }
164 1.6 maxv
165 1.1 maxv if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
166 1.1 maxv pa = __md_palloc();
167 1.4 maxv L4_BASE[pl4_i(va)] = pa | pteflags;
168 1.1 maxv }
169 1.1 maxv if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
170 1.1 maxv pa = __md_palloc();
171 1.4 maxv L3_BASE[pl3_i(va)] = pa | pteflags;
172 1.1 maxv }
173 1.1 maxv if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
174 1.4 maxv if ((pa = __md_palloc_large()) != 0) {
175 1.4 maxv L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
176 1.4 maxv pmap_pg_g;
177 1.4 maxv __insn_barrier();
178 1.4 maxv __builtin_memset((void *)va, 0, NBPD_L2);
179 1.4 maxv return;
180 1.4 maxv }
181 1.1 maxv pa = __md_palloc();
182 1.4 maxv L2_BASE[pl2_i(va)] = pa | pteflags;
183 1.4 maxv } else if (L2_BASE[pl2_i(va)] & PTE_PS) {
184 1.4 maxv return;
185 1.1 maxv }
186 1.1 maxv if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
187 1.1 maxv pa = __md_palloc();
188 1.4 maxv L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
189 1.1 maxv }
190 1.1 maxv }
191 1.1 maxv
192 1.1 maxv /*
193 1.1 maxv * Map only the current stack. We will map the rest in kasan_init.
194 1.1 maxv */
195 1.1 maxv static void
196 1.1 maxv kasan_md_early_init(void *stack)
197 1.1 maxv {
198 1.1 maxv kasan_shadow_map(stack, USPACE);
199 1.1 maxv __md_early = false;
200 1.1 maxv }
201 1.1 maxv
202 1.1 maxv /*
203 1.1 maxv * Create the shadow mapping. We don't create the 'User' area, because we
204 1.1 maxv * exclude it from the monitoring. The 'Main' area is created dynamically
205 1.1 maxv * in pmap_growkernel.
206 1.1 maxv */
207 1.1 maxv static void
208 1.1 maxv kasan_md_init(void)
209 1.1 maxv {
210 1.1 maxv extern struct bootspace bootspace;
211 1.1 maxv size_t i;
212 1.1 maxv
213 1.1 maxv CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
214 1.1 maxv
215 1.1 maxv /* Kernel. */
216 1.1 maxv for (i = 0; i < BTSPACE_NSEGS; i++) {
217 1.1 maxv if (bootspace.segs[i].type == BTSEG_NONE) {
218 1.1 maxv continue;
219 1.1 maxv }
220 1.1 maxv kasan_shadow_map((void *)bootspace.segs[i].va,
221 1.1 maxv bootspace.segs[i].sz);
222 1.1 maxv }
223 1.1 maxv
224 1.1 maxv /* Boot region. */
225 1.1 maxv kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
226 1.1 maxv
227 1.1 maxv /* Module map. */
228 1.1 maxv kasan_shadow_map((void *)bootspace.smodule,
229 1.1 maxv (size_t)(bootspace.emodule - bootspace.smodule));
230 1.1 maxv
231 1.1 maxv /* The bootstrap spare va. */
232 1.1 maxv kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
233 1.1 maxv }
234 1.1 maxv
235 1.1 maxv static inline bool
236 1.1 maxv __md_unwind_end(const char *name)
237 1.1 maxv {
238 1.1 maxv if (!strcmp(name, "syscall") ||
239 1.2 maxv !strcmp(name, "alltraps") ||
240 1.1 maxv !strcmp(name, "handle_syscall") ||
241 1.2 maxv !strncmp(name, "Xtrap", 5) ||
242 1.1 maxv !strncmp(name, "Xintr", 5) ||
243 1.1 maxv !strncmp(name, "Xhandle", 7) ||
244 1.1 maxv !strncmp(name, "Xresume", 7) ||
245 1.1 maxv !strncmp(name, "Xstray", 6) ||
246 1.1 maxv !strncmp(name, "Xhold", 5) ||
247 1.1 maxv !strncmp(name, "Xrecurse", 8) ||
248 1.1 maxv !strcmp(name, "Xdoreti") ||
249 1.1 maxv !strncmp(name, "Xsoft", 5)) {
250 1.1 maxv return true;
251 1.1 maxv }
252 1.1 maxv
253 1.1 maxv return false;
254 1.1 maxv }
255 1.1 maxv
256 1.1 maxv static void
257 1.1 maxv kasan_md_unwind(void)
258 1.1 maxv {
259 1.1 maxv uint64_t *rbp, rip;
260 1.1 maxv const char *mod;
261 1.1 maxv const char *sym;
262 1.1 maxv size_t nsym;
263 1.1 maxv int error;
264 1.1 maxv
265 1.1 maxv rbp = (uint64_t *)__builtin_frame_address(0);
266 1.1 maxv nsym = 0;
267 1.1 maxv
268 1.1 maxv while (1) {
269 1.1 maxv /* 8(%rbp) contains the saved %rip. */
270 1.1 maxv rip = *(rbp + 1);
271 1.1 maxv
272 1.1 maxv if (rip < KERNBASE) {
273 1.1 maxv break;
274 1.1 maxv }
275 1.1 maxv error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
276 1.1 maxv if (error) {
277 1.1 maxv break;
278 1.1 maxv }
279 1.1 maxv printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
280 1.1 maxv if (__md_unwind_end(sym)) {
281 1.1 maxv break;
282 1.1 maxv }
283 1.1 maxv
284 1.1 maxv rbp = (uint64_t *)*(rbp);
285 1.1 maxv if (rbp == 0) {
286 1.1 maxv break;
287 1.1 maxv }
288 1.1 maxv nsym++;
289 1.1 maxv
290 1.1 maxv if (nsym >= 15) {
291 1.1 maxv break;
292 1.1 maxv }
293 1.1 maxv }
294 1.1 maxv }
295