asan.h revision 1.9 1 1.8 skrll /* $NetBSD: asan.h,v 1.9 2020/08/01 06:35:00 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.9 maxv * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
8 1.1 maxv * by Maxime Villard.
9 1.1 maxv *
10 1.1 maxv * Redistribution and use in source and binary forms, with or without
11 1.1 maxv * modification, are permitted provided that the following conditions
12 1.1 maxv * are met:
13 1.1 maxv * 1. Redistributions of source code must retain the above copyright
14 1.1 maxv * notice, this list of conditions and the following disclaimer.
15 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 maxv * notice, this list of conditions and the following disclaimer in the
17 1.1 maxv * documentation and/or other materials provided with the distribution.
18 1.1 maxv *
19 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
30 1.1 maxv */
31 1.1 maxv
32 1.1 maxv #include <sys/atomic.h>
33 1.2 skrll #include <sys/ksyms.h>
34 1.2 skrll
35 1.1 maxv #include <aarch64/pmap.h>
36 1.1 maxv #include <aarch64/vmparam.h>
37 1.1 maxv #include <aarch64/cpufunc.h>
38 1.1 maxv #include <aarch64/armreg.h>
39 1.3 maxv #include <aarch64/machdep.h>
40 1.1 maxv
41 1.1 maxv #define __MD_VIRTUAL_SHIFT 48 /* 49bit address space, cut half */
42 1.7 maxv #define __MD_KERNMEM_BASE 0xFFFF000000000000 /* kern mem base address */
43 1.1 maxv
44 1.1 maxv #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
45 1.1 maxv #define KASAN_MD_SHADOW_START (AARCH64_KSEG_END)
46 1.1 maxv #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
47 1.1 maxv
48 1.3 maxv static bool __md_early __read_mostly = true;
49 1.3 maxv
50 1.1 maxv static inline int8_t *
51 1.1 maxv kasan_md_addr_to_shad(const void *addr)
52 1.1 maxv {
53 1.1 maxv vaddr_t va = (vaddr_t)addr;
54 1.1 maxv return (int8_t *)(KASAN_MD_SHADOW_START +
55 1.7 maxv ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
56 1.1 maxv }
57 1.1 maxv
58 1.1 maxv static inline bool
59 1.1 maxv kasan_md_unsupported(vaddr_t addr)
60 1.1 maxv {
61 1.1 maxv return (addr < VM_MIN_KERNEL_ADDRESS) ||
62 1.1 maxv (addr >= VM_KERNEL_IO_ADDRESS);
63 1.1 maxv }
64 1.1 maxv
65 1.1 maxv static paddr_t
66 1.1 maxv __md_palloc(void)
67 1.1 maxv {
68 1.1 maxv paddr_t pa;
69 1.1 maxv
70 1.3 maxv if (__predict_false(__md_early))
71 1.8 skrll pa = (paddr_t)pmapboot_pagealloc();
72 1.3 maxv else
73 1.6 ryo pa = pmap_alloc_pdp(pmap_kernel(), NULL, 0, false);
74 1.1 maxv
75 1.9 maxv /* The page is zeroed. */
76 1.1 maxv return pa;
77 1.1 maxv }
78 1.1 maxv
79 1.9 maxv static inline paddr_t
80 1.9 maxv __md_palloc_large(void)
81 1.9 maxv {
82 1.9 maxv struct pglist pglist;
83 1.9 maxv int ret;
84 1.9 maxv
85 1.9 maxv if (!uvm.page_init_done)
86 1.9 maxv return 0;
87 1.9 maxv
88 1.9 maxv ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
89 1.9 maxv &pglist, 1, 0);
90 1.9 maxv if (ret != 0)
91 1.9 maxv return 0;
92 1.9 maxv
93 1.9 maxv /* The page may not be zeroed. */
94 1.9 maxv return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
95 1.9 maxv }
96 1.9 maxv
97 1.1 maxv static void
98 1.1 maxv kasan_md_shadow_map_page(vaddr_t va)
99 1.1 maxv {
100 1.1 maxv pd_entry_t *l0, *l1, *l2, *l3;
101 1.1 maxv paddr_t l0pa, pa;
102 1.1 maxv pd_entry_t pde;
103 1.1 maxv size_t idx;
104 1.1 maxv
105 1.1 maxv l0pa = reg_ttbr1_el1_read();
106 1.3 maxv if (__predict_false(__md_early)) {
107 1.3 maxv l0 = (void *)KERN_PHYSTOV(l0pa);
108 1.3 maxv } else {
109 1.3 maxv l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
110 1.3 maxv }
111 1.1 maxv
112 1.1 maxv idx = l0pde_index(va);
113 1.1 maxv pde = l0[idx];
114 1.1 maxv if (!l0pde_valid(pde)) {
115 1.1 maxv pa = __md_palloc();
116 1.1 maxv atomic_swap_64(&l0[idx], pa | L0_TABLE);
117 1.1 maxv } else {
118 1.1 maxv pa = l0pde_pa(pde);
119 1.1 maxv }
120 1.3 maxv if (__predict_false(__md_early)) {
121 1.3 maxv l1 = (void *)KERN_PHYSTOV(pa);
122 1.3 maxv } else {
123 1.3 maxv l1 = (void *)AARCH64_PA_TO_KVA(pa);
124 1.3 maxv }
125 1.1 maxv
126 1.1 maxv idx = l1pde_index(va);
127 1.1 maxv pde = l1[idx];
128 1.1 maxv if (!l1pde_valid(pde)) {
129 1.1 maxv pa = __md_palloc();
130 1.1 maxv atomic_swap_64(&l1[idx], pa | L1_TABLE);
131 1.1 maxv } else {
132 1.1 maxv pa = l1pde_pa(pde);
133 1.1 maxv }
134 1.3 maxv if (__predict_false(__md_early)) {
135 1.3 maxv l2 = (void *)KERN_PHYSTOV(pa);
136 1.3 maxv } else {
137 1.3 maxv l2 = (void *)AARCH64_PA_TO_KVA(pa);
138 1.3 maxv }
139 1.1 maxv
140 1.1 maxv idx = l2pde_index(va);
141 1.1 maxv pde = l2[idx];
142 1.1 maxv if (!l2pde_valid(pde)) {
143 1.9 maxv /* If possible, use L2_BLOCK to map it in advance. */
144 1.9 maxv if ((pa = __md_palloc_large()) != 0) {
145 1.9 maxv atomic_swap_64(&l2[idx], pa | L2_BLOCK |
146 1.9 maxv LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
147 1.9 maxv LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
148 1.9 maxv aarch64_tlbi_by_va(va);
149 1.9 maxv __builtin_memset((void *)va, 0, L2_SIZE);
150 1.9 maxv return;
151 1.9 maxv }
152 1.1 maxv pa = __md_palloc();
153 1.1 maxv atomic_swap_64(&l2[idx], pa | L2_TABLE);
154 1.9 maxv } else if (l2pde_is_block(pde)) {
155 1.9 maxv /* This VA is already mapped as a block. */
156 1.9 maxv return;
157 1.1 maxv } else {
158 1.1 maxv pa = l2pde_pa(pde);
159 1.1 maxv }
160 1.3 maxv if (__predict_false(__md_early)) {
161 1.3 maxv l3 = (void *)KERN_PHYSTOV(pa);
162 1.3 maxv } else {
163 1.3 maxv l3 = (void *)AARCH64_PA_TO_KVA(pa);
164 1.3 maxv }
165 1.1 maxv
166 1.1 maxv idx = l3pte_index(va);
167 1.1 maxv pde = l3[idx];
168 1.1 maxv if (!l3pte_valid(pde)) {
169 1.1 maxv pa = __md_palloc();
170 1.1 maxv atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
171 1.4 ryo LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
172 1.4 ryo LX_BLKPAG_AP_RW);
173 1.1 maxv aarch64_tlbi_by_va(va);
174 1.1 maxv }
175 1.1 maxv }
176 1.1 maxv
177 1.3 maxv static void
178 1.3 maxv kasan_md_early_init(void *stack)
179 1.3 maxv {
180 1.3 maxv kasan_shadow_map(stack, USPACE);
181 1.3 maxv __md_early = false;
182 1.3 maxv }
183 1.1 maxv
184 1.1 maxv static void
185 1.1 maxv kasan_md_init(void)
186 1.1 maxv {
187 1.1 maxv vaddr_t eva, dummy;
188 1.1 maxv
189 1.1 maxv CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
190 1.1 maxv
191 1.1 maxv /* The VAs we've created until now. */
192 1.1 maxv pmap_virtual_space(&eva, &dummy);
193 1.1 maxv kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
194 1.1 maxv eva - VM_MIN_KERNEL_ADDRESS);
195 1.1 maxv }
196 1.1 maxv
197 1.2 skrll static inline bool
198 1.2 skrll __md_unwind_end(const char *name)
199 1.2 skrll {
200 1.2 skrll if (!strncmp(name, "el0_trap", 8) ||
201 1.2 skrll !strncmp(name, "el1_trap", 8)) {
202 1.2 skrll return true;
203 1.2 skrll }
204 1.2 skrll
205 1.2 skrll return false;
206 1.2 skrll }
207 1.2 skrll
208 1.2 skrll static void
209 1.2 skrll kasan_md_unwind(void)
210 1.2 skrll {
211 1.2 skrll uint64_t lr, *fp;
212 1.2 skrll const char *mod;
213 1.2 skrll const char *sym;
214 1.2 skrll size_t nsym;
215 1.2 skrll int error;
216 1.2 skrll
217 1.2 skrll fp = (uint64_t *)__builtin_frame_address(0);
218 1.2 skrll nsym = 0;
219 1.2 skrll
220 1.2 skrll while (1) {
221 1.2 skrll /*
222 1.2 skrll * normal stack frame
223 1.2 skrll * fp[0] saved fp(x29) value
224 1.2 skrll * fp[1] saved lr(x30) value
225 1.2 skrll */
226 1.2 skrll lr = fp[1];
227 1.2 skrll
228 1.2 skrll if (lr < VM_MIN_KERNEL_ADDRESS) {
229 1.2 skrll break;
230 1.2 skrll }
231 1.2 skrll error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
232 1.2 skrll if (error) {
233 1.2 skrll break;
234 1.2 skrll }
235 1.2 skrll printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
236 1.2 skrll if (__md_unwind_end(sym)) {
237 1.2 skrll break;
238 1.2 skrll }
239 1.2 skrll
240 1.2 skrll fp = (uint64_t *)fp[0];
241 1.2 skrll if (fp == NULL) {
242 1.2 skrll break;
243 1.2 skrll }
244 1.2 skrll nsym++;
245 1.2 skrll
246 1.2 skrll if (nsym >= 15) {
247 1.2 skrll break;
248 1.2 skrll }
249 1.2 skrll }
250 1.2 skrll }
251