msan.h revision 1.6 1 1.6 hannken /* $NetBSD: msan.h,v 1.6 2020/11/18 16:13:34 hannken Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.5 maxv * Copyright (c) 2019-2020 Maxime Villard, m00nbsd.net
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.5 maxv * This code is part of the KMSAN subsystem of the NetBSD kernel.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.5 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.5 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.5 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.5 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.5 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.5 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.5 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.5 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.5 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.5 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.5 maxv * SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include <sys/ksyms.h>
32 1.1 maxv
33 1.6 hannken #include <uvm/uvm.h>
34 1.6 hannken
35 1.1 maxv #include <amd64/pmap.h>
36 1.1 maxv #include <amd64/vmparam.h>
37 1.1 maxv
38 1.1 maxv #ifdef __HAVE_PCPU_AREA
39 1.1 maxv #error "PCPU area not allowed with KMSAN"
40 1.1 maxv #endif
41 1.1 maxv #ifdef __HAVE_DIRECT_MAP
42 1.1 maxv #error "DMAP not allowed with KMSAN"
43 1.1 maxv #endif
44 1.1 maxv
45 1.1 maxv /*
46 1.1 maxv * One big shadow, divided in two sub-shadows (SHAD and ORIG), themselves
47 1.1 maxv * divided in two regions (MAIN and KERN).
48 1.1 maxv */
49 1.1 maxv
50 1.1 maxv #define __MD_SHADOW_SIZE 0x20000000000ULL /* 4 * NBPD_L4 */
51 1.1 maxv #define __MD_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KMSAN * NBPD_L4)))
52 1.1 maxv #define __MD_SHADOW_END (__MD_SHADOW_START + __MD_SHADOW_SIZE)
53 1.1 maxv
54 1.1 maxv #define __MD_SHAD_MAIN_START (__MD_SHADOW_START)
55 1.1 maxv #define __MD_SHAD_KERN_START (__MD_SHADOW_START + 0x8000000000ULL)
56 1.1 maxv
57 1.1 maxv #define __MD_ORIG_MAIN_START (__MD_SHAD_KERN_START + 0x8000000000ULL)
58 1.1 maxv #define __MD_ORIG_KERN_START (__MD_ORIG_MAIN_START + 0x8000000000ULL)
59 1.1 maxv
60 1.1 maxv #define __MD_PTR_BASE 0xFFFFFFFF80000000ULL
61 1.1 maxv #define __MD_ORIG_TYPE __BITS(31,28)
62 1.1 maxv
63 1.1 maxv static inline int8_t *
64 1.1 maxv kmsan_md_addr_to_shad(const void *addr)
65 1.1 maxv {
66 1.1 maxv vaddr_t va = (vaddr_t)addr;
67 1.1 maxv
68 1.1 maxv if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
69 1.1 maxv return (int8_t *)(__MD_SHAD_MAIN_START + (va - vm_min_kernel_address));
70 1.1 maxv } else if (va >= KERNBASE) {
71 1.1 maxv return (int8_t *)(__MD_SHAD_KERN_START + (va - KERNBASE));
72 1.1 maxv } else {
73 1.1 maxv panic("%s: impossible, va=%p", __func__, (void *)va);
74 1.1 maxv }
75 1.1 maxv }
76 1.1 maxv
77 1.1 maxv static inline int8_t *
78 1.1 maxv kmsan_md_addr_to_orig(const void *addr)
79 1.1 maxv {
80 1.1 maxv vaddr_t va = (vaddr_t)addr;
81 1.1 maxv
82 1.1 maxv if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
83 1.1 maxv return (int8_t *)(__MD_ORIG_MAIN_START + (va - vm_min_kernel_address));
84 1.1 maxv } else if (va >= KERNBASE) {
85 1.1 maxv return (int8_t *)(__MD_ORIG_KERN_START + (va - KERNBASE));
86 1.1 maxv } else {
87 1.1 maxv panic("%s: impossible, va=%p", __func__, (void *)va);
88 1.1 maxv }
89 1.1 maxv }
90 1.1 maxv
91 1.1 maxv static inline bool
92 1.1 maxv kmsan_md_unsupported(vaddr_t addr)
93 1.1 maxv {
94 1.1 maxv return (addr >= (vaddr_t)PTE_BASE &&
95 1.1 maxv addr < ((vaddr_t)PTE_BASE + NBPD_L4));
96 1.1 maxv }
97 1.1 maxv
98 1.1 maxv static inline paddr_t
99 1.1 maxv __md_palloc(void)
100 1.1 maxv {
101 1.2 maxv /* The page is zeroed. */
102 1.1 maxv return pmap_get_physpage();
103 1.1 maxv }
104 1.1 maxv
105 1.2 maxv static inline paddr_t
106 1.2 maxv __md_palloc_large(void)
107 1.2 maxv {
108 1.2 maxv struct pglist pglist;
109 1.2 maxv int ret;
110 1.2 maxv
111 1.2 maxv if (!uvm.page_init_done)
112 1.2 maxv return 0;
113 1.2 maxv
114 1.2 maxv kmsan_init_arg(sizeof(psize_t) + 4 * sizeof(paddr_t) +
115 1.2 maxv sizeof(struct pglist *) + 2 * sizeof(int));
116 1.2 maxv ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
117 1.2 maxv &pglist, 1, 0);
118 1.2 maxv if (ret != 0)
119 1.2 maxv return 0;
120 1.2 maxv
121 1.2 maxv /* The page may not be zeroed. */
122 1.2 maxv return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
123 1.2 maxv }
124 1.2 maxv
125 1.1 maxv static void
126 1.1 maxv kmsan_md_shadow_map_page(vaddr_t va)
127 1.1 maxv {
128 1.2 maxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
129 1.1 maxv paddr_t pa;
130 1.1 maxv
131 1.1 maxv KASSERT(va >= __MD_SHADOW_START && va < __MD_SHADOW_END);
132 1.1 maxv
133 1.1 maxv if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
134 1.1 maxv pa = __md_palloc();
135 1.2 maxv L4_BASE[pl4_i(va)] = pa | pteflags;
136 1.1 maxv }
137 1.1 maxv if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
138 1.1 maxv pa = __md_palloc();
139 1.2 maxv L3_BASE[pl3_i(va)] = pa | pteflags;
140 1.1 maxv }
141 1.1 maxv if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
142 1.2 maxv if ((pa = __md_palloc_large()) != 0) {
143 1.2 maxv L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
144 1.2 maxv pmap_pg_g;
145 1.2 maxv __insn_barrier();
146 1.4 christos __builtin_memset((void *)va, 0, NBPD_L2);
147 1.2 maxv return;
148 1.2 maxv }
149 1.1 maxv pa = __md_palloc();
150 1.2 maxv L2_BASE[pl2_i(va)] = pa | pteflags;
151 1.2 maxv } else if (L2_BASE[pl2_i(va)] & PTE_PS) {
152 1.2 maxv return;
153 1.1 maxv }
154 1.1 maxv if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
155 1.1 maxv pa = __md_palloc();
156 1.2 maxv L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
157 1.1 maxv }
158 1.1 maxv }
159 1.1 maxv
160 1.1 maxv static void
161 1.1 maxv kmsan_md_init(void)
162 1.1 maxv {
163 1.1 maxv extern struct bootspace bootspace;
164 1.1 maxv size_t i;
165 1.1 maxv
166 1.1 maxv CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KMSAN);
167 1.1 maxv
168 1.1 maxv /* Kernel. */
169 1.1 maxv for (i = 0; i < BTSPACE_NSEGS; i++) {
170 1.1 maxv if (bootspace.segs[i].type == BTSEG_NONE) {
171 1.1 maxv continue;
172 1.1 maxv }
173 1.1 maxv kmsan_shadow_map((void *)bootspace.segs[i].va,
174 1.1 maxv bootspace.segs[i].sz);
175 1.1 maxv }
176 1.1 maxv
177 1.1 maxv /* Boot region. */
178 1.1 maxv kmsan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
179 1.1 maxv
180 1.1 maxv /* Module map. */
181 1.1 maxv kmsan_shadow_map((void *)bootspace.smodule,
182 1.1 maxv (size_t)(bootspace.emodule - bootspace.smodule));
183 1.1 maxv
184 1.1 maxv /* The bootstrap spare va. */
185 1.1 maxv kmsan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
186 1.1 maxv }
187 1.1 maxv
188 1.1 maxv static inline msan_orig_t
189 1.1 maxv kmsan_md_orig_encode(int type, uintptr_t ptr)
190 1.1 maxv {
191 1.1 maxv msan_orig_t ret;
192 1.1 maxv
193 1.1 maxv ret = (ptr & 0xFFFFFFFF) & ~__MD_ORIG_TYPE;
194 1.1 maxv ret |= __SHIFTIN(type, __MD_ORIG_TYPE);
195 1.1 maxv
196 1.1 maxv return ret;
197 1.1 maxv }
198 1.1 maxv
199 1.1 maxv static inline void
200 1.1 maxv kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
201 1.1 maxv {
202 1.1 maxv *type = __SHIFTOUT(orig, __MD_ORIG_TYPE);
203 1.1 maxv *ptr = (uintptr_t)(orig & ~__MD_ORIG_TYPE) | __MD_PTR_BASE;
204 1.1 maxv }
205 1.1 maxv
206 1.1 maxv static inline bool
207 1.1 maxv kmsan_md_is_pc(uintptr_t ptr)
208 1.1 maxv {
209 1.1 maxv extern uint8_t __rodata_start;
210 1.1 maxv
211 1.1 maxv return (ptr < (uintptr_t)&__rodata_start);
212 1.1 maxv }
213 1.1 maxv
214 1.1 maxv static inline bool
215 1.1 maxv __md_unwind_end(const char *name)
216 1.1 maxv {
217 1.1 maxv if (!strcmp(name, "syscall") ||
218 1.1 maxv !strcmp(name, "alltraps") ||
219 1.1 maxv !strcmp(name, "handle_syscall") ||
220 1.1 maxv !strncmp(name, "Xtrap", 5) ||
221 1.1 maxv !strncmp(name, "Xintr", 5) ||
222 1.1 maxv !strncmp(name, "Xhandle", 7) ||
223 1.1 maxv !strncmp(name, "Xresume", 7) ||
224 1.1 maxv !strncmp(name, "Xstray", 6) ||
225 1.1 maxv !strncmp(name, "Xhold", 5) ||
226 1.1 maxv !strncmp(name, "Xrecurse", 8) ||
227 1.1 maxv !strcmp(name, "Xdoreti") ||
228 1.1 maxv !strncmp(name, "Xsoft", 5)) {
229 1.1 maxv return true;
230 1.1 maxv }
231 1.1 maxv
232 1.1 maxv return false;
233 1.1 maxv }
234 1.1 maxv
235 1.1 maxv static void
236 1.1 maxv kmsan_md_unwind(void)
237 1.1 maxv {
238 1.1 maxv uint64_t *rbp, rip;
239 1.1 maxv const char *mod;
240 1.1 maxv const char *sym;
241 1.1 maxv size_t nsym;
242 1.1 maxv int error;
243 1.1 maxv
244 1.1 maxv rbp = (uint64_t *)__builtin_frame_address(0);
245 1.1 maxv nsym = 0;
246 1.1 maxv
247 1.1 maxv while (1) {
248 1.1 maxv /* 8(%rbp) contains the saved %rip. */
249 1.1 maxv rip = *(rbp + 1);
250 1.1 maxv
251 1.1 maxv if (rip < KERNBASE) {
252 1.1 maxv break;
253 1.1 maxv }
254 1.1 maxv error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
255 1.1 maxv if (error) {
256 1.1 maxv break;
257 1.1 maxv }
258 1.1 maxv kmsan_printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
259 1.1 maxv if (__md_unwind_end(sym)) {
260 1.1 maxv break;
261 1.1 maxv }
262 1.1 maxv
263 1.1 maxv rbp = (uint64_t *)*(rbp);
264 1.1 maxv if (rbp == 0) {
265 1.1 maxv break;
266 1.1 maxv }
267 1.1 maxv nsym++;
268 1.1 maxv
269 1.1 maxv if (nsym >= 15) {
270 1.1 maxv break;
271 1.1 maxv }
272 1.1 maxv }
273 1.1 maxv }
274