asan.h revision 1.14 1 1.14 skrll /* $NetBSD: asan.h,v 1.14 2020/11/10 07:51:19 skrll Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.11 maxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.11 maxv * This code is part of the KASAN subsystem of the NetBSD kernel.
8 1.1 maxv *
9 1.1 maxv * Redistribution and use in source and binary forms, with or without
10 1.1 maxv * modification, are permitted provided that the following conditions
11 1.1 maxv * are met:
12 1.1 maxv * 1. Redistributions of source code must retain the above copyright
13 1.1 maxv * notice, this list of conditions and the following disclaimer.
14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 maxv * notice, this list of conditions and the following disclaimer in the
16 1.1 maxv * documentation and/or other materials provided with the distribution.
17 1.1 maxv *
18 1.11 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.11 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.11 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.11 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.11 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.11 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.11 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.11 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.11 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.11 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.11 maxv * SUCH DAMAGE.
29 1.1 maxv */
30 1.1 maxv
31 1.1 maxv #include <sys/atomic.h>
32 1.2 skrll #include <sys/ksyms.h>
33 1.2 skrll
34 1.10 riastrad #include <uvm/uvm.h>
35 1.10 riastrad
36 1.1 maxv #include <aarch64/pmap.h>
37 1.1 maxv #include <aarch64/vmparam.h>
38 1.1 maxv #include <aarch64/cpufunc.h>
39 1.1 maxv #include <aarch64/armreg.h>
40 1.3 maxv #include <aarch64/machdep.h>
41 1.1 maxv
42 1.1 maxv #define __MD_VIRTUAL_SHIFT 48 /* 49bit address space, cut half */
43 1.7 maxv #define __MD_KERNMEM_BASE 0xFFFF000000000000 /* kern mem base address */
44 1.1 maxv
45 1.1 maxv #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
46 1.14 skrll #define KASAN_MD_SHADOW_START (AARCH64_DIRECTMAP_END)
47 1.1 maxv #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
48 1.1 maxv
49 1.3 maxv static bool __md_early __read_mostly = true;
50 1.3 maxv
51 1.1 maxv static inline int8_t *
52 1.1 maxv kasan_md_addr_to_shad(const void *addr)
53 1.1 maxv {
54 1.1 maxv vaddr_t va = (vaddr_t)addr;
55 1.1 maxv return (int8_t *)(KASAN_MD_SHADOW_START +
56 1.7 maxv ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
57 1.1 maxv }
58 1.1 maxv
59 1.1 maxv static inline bool
60 1.1 maxv kasan_md_unsupported(vaddr_t addr)
61 1.1 maxv {
62 1.1 maxv return (addr < VM_MIN_KERNEL_ADDRESS) ||
63 1.1 maxv (addr >= VM_KERNEL_IO_ADDRESS);
64 1.1 maxv }
65 1.1 maxv
66 1.1 maxv static paddr_t
67 1.1 maxv __md_palloc(void)
68 1.1 maxv {
69 1.1 maxv paddr_t pa;
70 1.1 maxv
71 1.12 skrll if (__predict_false(__md_early)) {
72 1.8 skrll pa = (paddr_t)pmapboot_pagealloc();
73 1.12 skrll return pa;
74 1.12 skrll }
75 1.12 skrll
76 1.12 skrll vaddr_t va;
77 1.12 skrll if (!uvm.page_init_done) {
78 1.12 skrll va = uvm_pageboot_alloc(PAGE_SIZE);
79 1.12 skrll pa = AARCH64_KVA_TO_PA(va);
80 1.12 skrll } else {
81 1.12 skrll struct vm_page *pg;
82 1.12 skrll retry:
83 1.12 skrll pg = uvm_pagealloc(NULL, 0, NULL, 0);
84 1.12 skrll if (pg == NULL) {
85 1.12 skrll uvm_wait(__func__);
86 1.12 skrll goto retry;
87 1.12 skrll }
88 1.12 skrll
89 1.12 skrll pa = VM_PAGE_TO_PHYS(pg);
90 1.12 skrll va = AARCH64_PA_TO_KVA(pa);
91 1.12 skrll }
92 1.1 maxv
93 1.12 skrll __builtin_memset((void *)va, 0, PAGE_SIZE);
94 1.1 maxv return pa;
95 1.1 maxv }
96 1.1 maxv
97 1.9 maxv static inline paddr_t
98 1.9 maxv __md_palloc_large(void)
99 1.9 maxv {
100 1.9 maxv struct pglist pglist;
101 1.9 maxv int ret;
102 1.9 maxv
103 1.9 maxv if (!uvm.page_init_done)
104 1.9 maxv return 0;
105 1.9 maxv
106 1.9 maxv ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
107 1.9 maxv &pglist, 1, 0);
108 1.9 maxv if (ret != 0)
109 1.9 maxv return 0;
110 1.9 maxv
111 1.9 maxv /* The page may not be zeroed. */
112 1.9 maxv return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
113 1.9 maxv }
114 1.9 maxv
115 1.1 maxv static void
116 1.1 maxv kasan_md_shadow_map_page(vaddr_t va)
117 1.1 maxv {
118 1.1 maxv pd_entry_t *l0, *l1, *l2, *l3;
119 1.1 maxv paddr_t l0pa, pa;
120 1.1 maxv pd_entry_t pde;
121 1.1 maxv size_t idx;
122 1.1 maxv
123 1.1 maxv l0pa = reg_ttbr1_el1_read();
124 1.3 maxv if (__predict_false(__md_early)) {
125 1.3 maxv l0 = (void *)KERN_PHYSTOV(l0pa);
126 1.3 maxv } else {
127 1.3 maxv l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
128 1.3 maxv }
129 1.1 maxv
130 1.1 maxv idx = l0pde_index(va);
131 1.1 maxv pde = l0[idx];
132 1.1 maxv if (!l0pde_valid(pde)) {
133 1.1 maxv pa = __md_palloc();
134 1.1 maxv atomic_swap_64(&l0[idx], pa | L0_TABLE);
135 1.1 maxv } else {
136 1.1 maxv pa = l0pde_pa(pde);
137 1.1 maxv }
138 1.3 maxv if (__predict_false(__md_early)) {
139 1.3 maxv l1 = (void *)KERN_PHYSTOV(pa);
140 1.3 maxv } else {
141 1.3 maxv l1 = (void *)AARCH64_PA_TO_KVA(pa);
142 1.3 maxv }
143 1.1 maxv
144 1.1 maxv idx = l1pde_index(va);
145 1.1 maxv pde = l1[idx];
146 1.1 maxv if (!l1pde_valid(pde)) {
147 1.1 maxv pa = __md_palloc();
148 1.1 maxv atomic_swap_64(&l1[idx], pa | L1_TABLE);
149 1.1 maxv } else {
150 1.1 maxv pa = l1pde_pa(pde);
151 1.1 maxv }
152 1.3 maxv if (__predict_false(__md_early)) {
153 1.3 maxv l2 = (void *)KERN_PHYSTOV(pa);
154 1.3 maxv } else {
155 1.3 maxv l2 = (void *)AARCH64_PA_TO_KVA(pa);
156 1.3 maxv }
157 1.1 maxv
158 1.1 maxv idx = l2pde_index(va);
159 1.1 maxv pde = l2[idx];
160 1.1 maxv if (!l2pde_valid(pde)) {
161 1.9 maxv /* If possible, use L2_BLOCK to map it in advance. */
162 1.9 maxv if ((pa = __md_palloc_large()) != 0) {
163 1.9 maxv atomic_swap_64(&l2[idx], pa | L2_BLOCK |
164 1.9 maxv LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
165 1.9 maxv LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
166 1.9 maxv aarch64_tlbi_by_va(va);
167 1.9 maxv __builtin_memset((void *)va, 0, L2_SIZE);
168 1.9 maxv return;
169 1.9 maxv }
170 1.1 maxv pa = __md_palloc();
171 1.1 maxv atomic_swap_64(&l2[idx], pa | L2_TABLE);
172 1.9 maxv } else if (l2pde_is_block(pde)) {
173 1.9 maxv /* This VA is already mapped as a block. */
174 1.9 maxv return;
175 1.1 maxv } else {
176 1.1 maxv pa = l2pde_pa(pde);
177 1.1 maxv }
178 1.3 maxv if (__predict_false(__md_early)) {
179 1.3 maxv l3 = (void *)KERN_PHYSTOV(pa);
180 1.3 maxv } else {
181 1.3 maxv l3 = (void *)AARCH64_PA_TO_KVA(pa);
182 1.3 maxv }
183 1.1 maxv
184 1.1 maxv idx = l3pte_index(va);
185 1.1 maxv pde = l3[idx];
186 1.1 maxv if (!l3pte_valid(pde)) {
187 1.1 maxv pa = __md_palloc();
188 1.1 maxv atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
189 1.4 ryo LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
190 1.4 ryo LX_BLKPAG_AP_RW);
191 1.1 maxv aarch64_tlbi_by_va(va);
192 1.1 maxv }
193 1.1 maxv }
194 1.1 maxv
195 1.3 maxv static void
196 1.3 maxv kasan_md_early_init(void *stack)
197 1.3 maxv {
198 1.3 maxv kasan_shadow_map(stack, USPACE);
199 1.3 maxv __md_early = false;
200 1.3 maxv }
201 1.1 maxv
202 1.1 maxv static void
203 1.1 maxv kasan_md_init(void)
204 1.1 maxv {
205 1.1 maxv
206 1.1 maxv CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
207 1.1 maxv
208 1.1 maxv /* The VAs we've created until now. */
209 1.13 skrll vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
210 1.1 maxv kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
211 1.1 maxv eva - VM_MIN_KERNEL_ADDRESS);
212 1.1 maxv }
213 1.1 maxv
214 1.2 skrll static inline bool
215 1.2 skrll __md_unwind_end(const char *name)
216 1.2 skrll {
217 1.2 skrll if (!strncmp(name, "el0_trap", 8) ||
218 1.2 skrll !strncmp(name, "el1_trap", 8)) {
219 1.2 skrll return true;
220 1.2 skrll }
221 1.2 skrll
222 1.2 skrll return false;
223 1.2 skrll }
224 1.2 skrll
225 1.2 skrll static void
226 1.2 skrll kasan_md_unwind(void)
227 1.2 skrll {
228 1.2 skrll uint64_t lr, *fp;
229 1.2 skrll const char *mod;
230 1.2 skrll const char *sym;
231 1.2 skrll size_t nsym;
232 1.2 skrll int error;
233 1.2 skrll
234 1.2 skrll fp = (uint64_t *)__builtin_frame_address(0);
235 1.2 skrll nsym = 0;
236 1.2 skrll
237 1.2 skrll while (1) {
238 1.2 skrll /*
239 1.2 skrll * normal stack frame
240 1.2 skrll * fp[0] saved fp(x29) value
241 1.2 skrll * fp[1] saved lr(x30) value
242 1.2 skrll */
243 1.2 skrll lr = fp[1];
244 1.2 skrll
245 1.2 skrll if (lr < VM_MIN_KERNEL_ADDRESS) {
246 1.2 skrll break;
247 1.2 skrll }
248 1.2 skrll error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
249 1.2 skrll if (error) {
250 1.2 skrll break;
251 1.2 skrll }
252 1.2 skrll printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
253 1.2 skrll if (__md_unwind_end(sym)) {
254 1.2 skrll break;
255 1.2 skrll }
256 1.2 skrll
257 1.2 skrll fp = (uint64_t *)fp[0];
258 1.2 skrll if (fp == NULL) {
259 1.2 skrll break;
260 1.2 skrll }
261 1.2 skrll nsym++;
262 1.2 skrll
263 1.2 skrll if (nsym >= 15) {
264 1.2 skrll break;
265 1.2 skrll }
266 1.2 skrll }
267 1.2 skrll }
268