asan.h revision 1.5 1 1.5 riastrad /* $NetBSD: asan.h,v 1.5 2020/09/05 16:30:10 riastradh Exp $ */
2 1.1 skrll
3 1.1 skrll /*
4 1.1 skrll * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 1.1 skrll * All rights reserved.
6 1.1 skrll *
7 1.1 skrll * This code is derived from software contributed to The NetBSD Foundation
8 1.1 skrll * by Nick Hudson.
9 1.1 skrll *
10 1.1 skrll * Redistribution and use in source and binary forms, with or without
11 1.1 skrll * modification, are permitted provided that the following conditions
12 1.1 skrll * are met:
13 1.1 skrll * 1. Redistributions of source code must retain the above copyright
14 1.1 skrll * notice, this list of conditions and the following disclaimer.
15 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 skrll * notice, this list of conditions and the following disclaimer in the
17 1.1 skrll * documentation and/or other materials provided with the distribution.
18 1.1 skrll *
19 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1 skrll */
31 1.1 skrll
32 1.1 skrll #include <sys/atomic.h>
33 1.1 skrll #include <sys/ksyms.h>
34 1.1 skrll
35 1.5 riastrad #include <uvm/uvm.h>
36 1.5 riastrad
37 1.1 skrll #include <arm/vmparam.h>
38 1.1 skrll #include <arm/arm32/machdep.h>
39 1.1 skrll #include <arm/arm32/pmap.h>
40 1.1 skrll
41 1.1 skrll #define KASAN_MD_SHADOW_START VM_KERNEL_KASAN_BASE
42 1.1 skrll #define KASAN_MD_SHADOW_END VM_KERNEL_KASAN_END
43 1.1 skrll #define __MD_KERNMEM_BASE KERNEL_BASE
44 1.1 skrll
45 1.1 skrll static inline int8_t *
46 1.1 skrll kasan_md_addr_to_shad(const void *addr)
47 1.1 skrll {
48 1.1 skrll vaddr_t va = (vaddr_t)addr;
49 1.1 skrll return (int8_t *)(KASAN_MD_SHADOW_START +
50 1.1 skrll ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
51 1.1 skrll }
52 1.1 skrll
53 1.1 skrll static inline bool
54 1.1 skrll kasan_md_unsupported(vaddr_t addr)
55 1.1 skrll {
56 1.1 skrll return addr < VM_MIN_KERNEL_ADDRESS ||
57 1.1 skrll addr >= KASAN_MD_SHADOW_START;
58 1.1 skrll }
59 1.1 skrll
60 1.1 skrll /* -------------------------------------------------------------------------- */
61 1.1 skrll
62 1.1 skrll /*
63 1.1 skrll * Early mapping, used to map just the stack at boot time. We rely on the fact
64 1.1 skrll * that VA = PA + KERNEL_BASE.
65 1.1 skrll */
66 1.1 skrll
67 1.4 skrll /*
68 1.4 skrll * KASAN_NEARLYPAGES is hard to work out.
69 1.4 skrll *
70 1.4 skrll * The INIT_ARM_TOTAL_STACK shadow is reduced by the KASAN_SHADOW_SCALE_SIZE
71 1.4 skrll * factor. This shadow mapping is likely to span more than one L2 page tables
72 1.4 skrll * and, as a result, more than one PAGE_SIZE block. The L2 page tables might
73 1.4 skrll * span more than one L1 page table entry as well.
74 1.4 skrll *
75 1.4 skrll * To ensure we have enough start with the assumption of 1 L1 page table, and
76 1.4 skrll * the number of pages to map the shadow... then double for the spanning as
77 1.4 skrll * described above
78 1.4 skrll */
79 1.4 skrll
80 1.4 skrll #define KASAN_NEARLYPAGES \
81 1.4 skrll (2 * (1 + howmany(INIT_ARM_TOTAL_STACK / KASAN_SHADOW_SCALE_SIZE, PAGE_SIZE)))
82 1.1 skrll
83 1.1 skrll static bool __md_early __read_mostly;
84 1.4 skrll static size_t __md_nearlyl1pts __attribute__((__section__(".data"))) = 0;
85 1.1 skrll static size_t __md_nearlypages __attribute__((__section__(".data")));
86 1.1 skrll static uint8_t __md_earlypages[KASAN_NEARLYPAGES * PAGE_SIZE]
87 1.1 skrll __aligned(PAGE_SIZE) __attribute__((__section__(".data")));
88 1.1 skrll
89 1.1 skrll static vaddr_t
90 1.1 skrll __md_palloc(void)
91 1.1 skrll {
92 1.1 skrll paddr_t pa;
93 1.1 skrll
94 1.1 skrll if (__predict_false(__md_early)) {
95 1.1 skrll KASSERTMSG(__md_nearlypages < KASAN_NEARLYPAGES,
96 1.1 skrll "__md_nearlypages %zu", __md_nearlypages);
97 1.1 skrll
98 1.1 skrll vaddr_t va = (vaddr_t)(&__md_earlypages[0] + __md_nearlypages * PAGE_SIZE);
99 1.1 skrll __md_nearlypages++;
100 1.1 skrll __builtin_memset((void *)va, 0, PAGE_SIZE);
101 1.1 skrll
102 1.1 skrll return KERN_VTOPHYS(va);
103 1.1 skrll }
104 1.1 skrll
105 1.1 skrll if (!uvm.page_init_done) {
106 1.1 skrll if (uvm_page_physget(&pa) == false)
107 1.1 skrll panic("KASAN can't get a page");
108 1.1 skrll
109 1.1 skrll return pa;
110 1.1 skrll }
111 1.1 skrll
112 1.1 skrll struct vm_page *pg;
113 1.1 skrll retry:
114 1.1 skrll pg = uvm_pagealloc(NULL, 0, NULL, 0);
115 1.1 skrll if (pg == NULL) {
116 1.1 skrll uvm_wait(__func__);
117 1.1 skrll goto retry;
118 1.1 skrll }
119 1.1 skrll pa = VM_PAGE_TO_PHYS(pg);
120 1.1 skrll
121 1.1 skrll return pa;
122 1.1 skrll }
123 1.1 skrll
124 1.1 skrll static void
125 1.1 skrll kasan_md_shadow_map_page(vaddr_t va)
126 1.1 skrll {
127 1.1 skrll const uint32_t mask = L1_TABLE_SIZE - 1;
128 1.1 skrll const paddr_t ttb = (paddr_t)(armreg_ttbr1_read() & ~mask);
129 1.1 skrll pd_entry_t * const pdep = (pd_entry_t *)KERN_PHYSTOV(ttb);
130 1.1 skrll
131 1.1 skrll const size_t l1slot = l1pte_index(va);
132 1.1 skrll vaddr_t l2ptva;
133 1.1 skrll
134 1.1 skrll KASSERT((va & PAGE_MASK) == 0);
135 1.1 skrll
136 1.4 skrll extern bool kasan_l2pts_created;
137 1.4 skrll if (__predict_true(kasan_l2pts_created)) {
138 1.1 skrll /*
139 1.1 skrll * The shadow map area L2PTs were allocated and mapped
140 1.1 skrll * by arm32_kernel_vm_init. Use the array of pv_addr_t
141 1.1 skrll * to get the l2ptva.
142 1.1 skrll */
143 1.1 skrll extern pv_addr_t kasan_l2pt[];
144 1.1 skrll const size_t off = va - KASAN_MD_SHADOW_START;
145 1.1 skrll const size_t segoff = off & (L2_S_SEGSIZE - 1);
146 1.1 skrll const size_t idx = off / L2_S_SEGSIZE;
147 1.1 skrll const vaddr_t segl2ptva = kasan_l2pt[idx].pv_va;
148 1.1 skrll l2ptva = segl2ptva + l1pte_index(segoff) * L2_TABLE_SIZE_REAL;
149 1.4 skrll } else {
150 1.4 skrll /*
151 1.4 skrll * An L1PT entry is/may be required for bootstrap tables. As a
152 1.4 skrll * page gives enough space to multiple L2PTs the previous call
153 1.4 skrll * might have already created the L2PT.
154 1.4 skrll */
155 1.4 skrll if (!l1pte_page_p(pdep[l1slot])) {
156 1.4 skrll const paddr_t l2ptpa = __md_palloc();
157 1.4 skrll const vaddr_t segl2va = va & -L2_S_SEGSIZE;
158 1.4 skrll const size_t segl1slot = l1pte_index(segl2va);
159 1.4 skrll
160 1.4 skrll __md_nearlyl1pts++;
161 1.4 skrll
162 1.4 skrll const pd_entry_t npde =
163 1.4 skrll L1_C_PROTO | l2ptpa | L1_C_DOM(PMAP_DOMAIN_KERNEL);
164 1.4 skrll
165 1.4 skrll l1pte_set(pdep + segl1slot, npde);
166 1.4 skrll /*
167 1.4 skrll * No need for PDE_SYNC_RANGE here as we're creating
168 1.4 skrll * the bootstrap tables
169 1.4 skrll */
170 1.4 skrll }
171 1.4 skrll l2ptva = KERN_PHYSTOV(l1pte_pa(pdep[l1slot]));
172 1.1 skrll }
173 1.1 skrll
174 1.1 skrll pt_entry_t * l2pt = (pt_entry_t *)l2ptva;
175 1.1 skrll pt_entry_t * const ptep = &l2pt[l2pte_index(va)];
176 1.1 skrll
177 1.1 skrll if (!l2pte_valid_p(*ptep)) {
178 1.1 skrll const int prot = VM_PROT_READ | VM_PROT_WRITE;
179 1.1 skrll const paddr_t pa = __md_palloc();
180 1.1 skrll pt_entry_t npte =
181 1.1 skrll L2_S_PROTO |
182 1.1 skrll pa |
183 1.4 skrll (__md_early ? 0 : pte_l2_s_cache_mode_pt) |
184 1.1 skrll L2_S_PROT(PTE_KERNEL, prot);
185 1.4 skrll l2pte_set(ptep, npte, 0);
186 1.4 skrll
187 1.4 skrll if (!__md_early)
188 1.4 skrll PTE_SYNC(ptep);
189 1.1 skrll
190 1.1 skrll __builtin_memset((void *)va, 0, PAGE_SIZE);
191 1.1 skrll }
192 1.1 skrll }
193 1.1 skrll
194 1.1 skrll /*
195 1.1 skrll * Map the init stacks of the BP and APs. We will map the rest in kasan_init.
196 1.1 skrll */
197 1.1 skrll static void
198 1.1 skrll kasan_md_early_init(void *stack)
199 1.1 skrll {
200 1.1 skrll
201 1.4 skrll /*
202 1.4 skrll * We come through here twice. The first time is for generic_start
203 1.4 skrll * and the bootstrap tables. The second is for arm32_kernel_vm_init
204 1.4 skrll * and the real tables.
205 1.4 skrll *
206 1.4 skrll * In the first we have to create L1PT entries, whereas in the
207 1.4 skrll * second arm32_kernel_vm_init has setup kasan_l1pts (and the L1PT
208 1.4 skrll * entries for them
209 1.4 skrll */
210 1.1 skrll __md_early = true;
211 1.4 skrll __md_nearlypages = __md_nearlyl1pts;
212 1.4 skrll kasan_shadow_map(stack, INIT_ARM_TOTAL_STACK);
213 1.1 skrll __md_early = false;
214 1.1 skrll }
215 1.1 skrll
216 1.1 skrll static void
217 1.1 skrll kasan_md_init(void)
218 1.1 skrll {
219 1.1 skrll extern vaddr_t kasan_kernelstart;
220 1.1 skrll extern vaddr_t kasan_kernelsize;
221 1.1 skrll
222 1.1 skrll kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
223 1.1 skrll
224 1.1 skrll /* The VAs we've created until now. */
225 1.1 skrll vaddr_t eva;
226 1.1 skrll
227 1.2 skrll eva = pmap_growkernel(VM_KERNEL_VM_BASE);
228 1.2 skrll kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
229 1.1 skrll }
230 1.1 skrll
231 1.1 skrll
232 1.1 skrll static inline bool
233 1.1 skrll __md_unwind_end(const char *name)
234 1.1 skrll {
235 1.1 skrll static const char * const vectors[] = {
236 1.1 skrll "undefined_entry",
237 1.1 skrll "swi_entry",
238 1.1 skrll "prefetch_abort_entry",
239 1.1 skrll "data_abort_entry",
240 1.1 skrll "address_exception_entry",
241 1.1 skrll "irq_entry",
242 1.1 skrll "fiqvector"
243 1.1 skrll };
244 1.1 skrll
245 1.1 skrll for (size_t i = 0; i < __arraycount(vectors); i++) {
246 1.1 skrll if (!strncmp(name, vectors[i], strlen(vectors[i])))
247 1.1 skrll return true;
248 1.1 skrll }
249 1.1 skrll
250 1.1 skrll return false;
251 1.1 skrll }
252 1.1 skrll
253 1.1 skrll static void
254 1.1 skrll kasan_md_unwind(void)
255 1.1 skrll {
256 1.1 skrll uint32_t lr, *fp;
257 1.1 skrll const char *mod;
258 1.1 skrll const char *sym;
259 1.1 skrll size_t nsym;
260 1.1 skrll int error;
261 1.1 skrll
262 1.1 skrll fp = (uint32_t *)__builtin_frame_address(0);
263 1.1 skrll nsym = 0;
264 1.1 skrll
265 1.1 skrll while (1) {
266 1.1 skrll /*
267 1.1 skrll * normal frame
268 1.1 skrll * fp[ 0] saved code pointer
269 1.1 skrll * fp[-1] saved lr value
270 1.1 skrll * fp[-2] saved sp value
271 1.1 skrll * fp[-3] saved fp value
272 1.1 skrll */
273 1.1 skrll lr = fp[-1];
274 1.1 skrll
275 1.1 skrll if (lr < VM_MIN_KERNEL_ADDRESS) {
276 1.1 skrll break;
277 1.1 skrll }
278 1.1 skrll error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
279 1.1 skrll if (error) {
280 1.1 skrll break;
281 1.1 skrll }
282 1.1 skrll printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
283 1.1 skrll if (__md_unwind_end(sym)) {
284 1.1 skrll break;
285 1.1 skrll }
286 1.1 skrll
287 1.1 skrll fp = (uint32_t *)fp[-3];
288 1.1 skrll if (fp == NULL) {
289 1.1 skrll break;
290 1.1 skrll }
291 1.1 skrll nsym++;
292 1.1 skrll
293 1.1 skrll if (nsym >= 15) {
294 1.1 skrll break;
295 1.1 skrll }
296 1.1 skrll }
297 1.1 skrll }
298