asan.h revision 1.11 1 /* $NetBSD: asan.h,v 1.11 2020/09/10 14:10:46 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 * All rights reserved.
6 *
7 * This code is part of the KASAN subsystem of the NetBSD kernel.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/atomic.h>
32 #include <sys/ksyms.h>
33
34 #include <uvm/uvm.h>
35
36 #include <aarch64/pmap.h>
37 #include <aarch64/vmparam.h>
38 #include <aarch64/cpufunc.h>
39 #include <aarch64/armreg.h>
40 #include <aarch64/machdep.h>
41
42 #define __MD_VIRTUAL_SHIFT 48 /* 49bit address space, cut half */
43 #define __MD_KERNMEM_BASE 0xFFFF000000000000 /* kern mem base address */
44
45 #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
46 #define KASAN_MD_SHADOW_START (AARCH64_KSEG_END)
47 #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
48
49 static bool __md_early __read_mostly = true;
50
51 static inline int8_t *
52 kasan_md_addr_to_shad(const void *addr)
53 {
54 vaddr_t va = (vaddr_t)addr;
55 return (int8_t *)(KASAN_MD_SHADOW_START +
56 ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
57 }
58
59 static inline bool
60 kasan_md_unsupported(vaddr_t addr)
61 {
62 return (addr < VM_MIN_KERNEL_ADDRESS) ||
63 (addr >= VM_KERNEL_IO_ADDRESS);
64 }
65
66 static paddr_t
67 __md_palloc(void)
68 {
69 paddr_t pa;
70
71 if (__predict_false(__md_early))
72 pa = (paddr_t)pmapboot_pagealloc();
73 else
74 pa = pmap_alloc_pdp(pmap_kernel(), NULL, 0, false);
75
76 /* The page is zeroed. */
77 return pa;
78 }
79
80 static inline paddr_t
81 __md_palloc_large(void)
82 {
83 struct pglist pglist;
84 int ret;
85
86 if (!uvm.page_init_done)
87 return 0;
88
89 ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
90 &pglist, 1, 0);
91 if (ret != 0)
92 return 0;
93
94 /* The page may not be zeroed. */
95 return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
96 }
97
98 static void
99 kasan_md_shadow_map_page(vaddr_t va)
100 {
101 pd_entry_t *l0, *l1, *l2, *l3;
102 paddr_t l0pa, pa;
103 pd_entry_t pde;
104 size_t idx;
105
106 l0pa = reg_ttbr1_el1_read();
107 if (__predict_false(__md_early)) {
108 l0 = (void *)KERN_PHYSTOV(l0pa);
109 } else {
110 l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
111 }
112
113 idx = l0pde_index(va);
114 pde = l0[idx];
115 if (!l0pde_valid(pde)) {
116 pa = __md_palloc();
117 atomic_swap_64(&l0[idx], pa | L0_TABLE);
118 } else {
119 pa = l0pde_pa(pde);
120 }
121 if (__predict_false(__md_early)) {
122 l1 = (void *)KERN_PHYSTOV(pa);
123 } else {
124 l1 = (void *)AARCH64_PA_TO_KVA(pa);
125 }
126
127 idx = l1pde_index(va);
128 pde = l1[idx];
129 if (!l1pde_valid(pde)) {
130 pa = __md_palloc();
131 atomic_swap_64(&l1[idx], pa | L1_TABLE);
132 } else {
133 pa = l1pde_pa(pde);
134 }
135 if (__predict_false(__md_early)) {
136 l2 = (void *)KERN_PHYSTOV(pa);
137 } else {
138 l2 = (void *)AARCH64_PA_TO_KVA(pa);
139 }
140
141 idx = l2pde_index(va);
142 pde = l2[idx];
143 if (!l2pde_valid(pde)) {
144 /* If possible, use L2_BLOCK to map it in advance. */
145 if ((pa = __md_palloc_large()) != 0) {
146 atomic_swap_64(&l2[idx], pa | L2_BLOCK |
147 LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
148 LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
149 aarch64_tlbi_by_va(va);
150 __builtin_memset((void *)va, 0, L2_SIZE);
151 return;
152 }
153 pa = __md_palloc();
154 atomic_swap_64(&l2[idx], pa | L2_TABLE);
155 } else if (l2pde_is_block(pde)) {
156 /* This VA is already mapped as a block. */
157 return;
158 } else {
159 pa = l2pde_pa(pde);
160 }
161 if (__predict_false(__md_early)) {
162 l3 = (void *)KERN_PHYSTOV(pa);
163 } else {
164 l3 = (void *)AARCH64_PA_TO_KVA(pa);
165 }
166
167 idx = l3pte_index(va);
168 pde = l3[idx];
169 if (!l3pte_valid(pde)) {
170 pa = __md_palloc();
171 atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
172 LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
173 LX_BLKPAG_AP_RW);
174 aarch64_tlbi_by_va(va);
175 }
176 }
177
178 static void
179 kasan_md_early_init(void *stack)
180 {
181 kasan_shadow_map(stack, USPACE);
182 __md_early = false;
183 }
184
185 static void
186 kasan_md_init(void)
187 {
188 vaddr_t eva, dummy;
189
190 CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
191
192 /* The VAs we've created until now. */
193 pmap_virtual_space(&eva, &dummy);
194 kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
195 eva - VM_MIN_KERNEL_ADDRESS);
196 }
197
198 static inline bool
199 __md_unwind_end(const char *name)
200 {
201 if (!strncmp(name, "el0_trap", 8) ||
202 !strncmp(name, "el1_trap", 8)) {
203 return true;
204 }
205
206 return false;
207 }
208
209 static void
210 kasan_md_unwind(void)
211 {
212 uint64_t lr, *fp;
213 const char *mod;
214 const char *sym;
215 size_t nsym;
216 int error;
217
218 fp = (uint64_t *)__builtin_frame_address(0);
219 nsym = 0;
220
221 while (1) {
222 /*
223 * normal stack frame
224 * fp[0] saved fp(x29) value
225 * fp[1] saved lr(x30) value
226 */
227 lr = fp[1];
228
229 if (lr < VM_MIN_KERNEL_ADDRESS) {
230 break;
231 }
232 error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
233 if (error) {
234 break;
235 }
236 printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
237 if (__md_unwind_end(sym)) {
238 break;
239 }
240
241 fp = (uint64_t *)fp[0];
242 if (fp == NULL) {
243 break;
244 }
245 nsym++;
246
247 if (nsym >= 15) {
248 break;
249 }
250 }
251 }
252