subr_asan.c revision 1.8 1 /* $NetBSD: subr_asan.c,v 1.8 2019/05/04 10:07:10 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_asan.c,v 1.8 2019/05/04 10:07:10 maxv Exp $");
34
35 #include <sys/param.h>
36 #include <sys/device.h>
37 #include <sys/kernel.h>
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/asan.h>
43
44 #include <uvm/uvm.h>
45
46 /* ASAN constants. Part of the compiler ABI. */
47 #define KASAN_SHADOW_SCALE_SHIFT 3
48 #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
49 #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
50
51 /* The MD code. */
52 #include <machine/asan.h>
53
54 /* ASAN ABI version. */
55 #if defined(__clang__) && (__clang_major__ - 0 >= 6)
56 #define ASAN_ABI_VERSION 8
57 #elif __GNUC_PREREQ__(7, 1) && !defined(__clang__)
58 #define ASAN_ABI_VERSION 8
59 #elif __GNUC_PREREQ__(6, 1) && !defined(__clang__)
60 #define ASAN_ABI_VERSION 6
61 #else
62 #error "Unsupported compiler version"
63 #endif
64
65 #define __RET_ADDR (unsigned long)__builtin_return_address(0)
66
67 /* Global variable descriptor. Part of the compiler ABI. */
68 struct __asan_global_source_location {
69 const char *filename;
70 int line_no;
71 int column_no;
72 };
73 struct __asan_global {
74 const void *beg; /* address of the global variable */
75 size_t size; /* size of the global variable */
76 size_t size_with_redzone; /* size with the redzone */
77 const void *name; /* name of the variable */
78 const void *module_name; /* name of the module where the var is declared */
79 unsigned long has_dynamic_init; /* the var has dyn initializer (c++) */
80 struct __asan_global_source_location *location;
81 #if ASAN_ABI_VERSION >= 7
82 uintptr_t odr_indicator; /* the address of the ODR indicator symbol */
83 #endif
84 };
85
86 static bool kasan_enabled __read_mostly = false;
87
88 /* -------------------------------------------------------------------------- */
89
90 void
91 kasan_shadow_map(void *addr, size_t size)
92 {
93 size_t sz, npages, i;
94 vaddr_t sva, eva;
95
96 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
97
98 sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE;
99
100 sva = (vaddr_t)kasan_md_addr_to_shad(addr);
101 eva = (vaddr_t)kasan_md_addr_to_shad(addr) + sz;
102
103 sva = rounddown(sva, PAGE_SIZE);
104 eva = roundup(eva, PAGE_SIZE);
105
106 npages = (eva - sva) / PAGE_SIZE;
107
108 KASSERT(sva >= KASAN_MD_SHADOW_START && eva < KASAN_MD_SHADOW_END);
109
110 for (i = 0; i < npages; i++) {
111 kasan_md_shadow_map_page(sva + i * PAGE_SIZE);
112 }
113 }
114
115 static void
116 kasan_ctors(void)
117 {
118 extern uint64_t __CTOR_LIST__, __CTOR_END__;
119 size_t nentries, i;
120 uint64_t *ptr;
121
122 nentries = ((size_t)&__CTOR_END__ - (size_t)&__CTOR_LIST__) /
123 sizeof(uintptr_t);
124
125 ptr = &__CTOR_LIST__;
126 for (i = 0; i < nentries; i++) {
127 void (*func)(void);
128
129 func = (void *)(*ptr);
130 (*func)();
131
132 ptr++;
133 }
134 }
135
136 void
137 kasan_early_init(void *stack)
138 {
139 kasan_md_early_init(stack);
140 }
141
142 void
143 kasan_init(void)
144 {
145 /* MD initialization. */
146 kasan_md_init();
147
148 /* Now officially enabled. */
149 kasan_enabled = true;
150
151 /* Call the ASAN constructors. */
152 kasan_ctors();
153 }
154
155 static inline const char *
156 kasan_code_name(uint8_t code)
157 {
158 switch (code) {
159 case KASAN_GENERIC_REDZONE:
160 return "GenericRedZone";
161 case KASAN_MALLOC_REDZONE:
162 return "MallocRedZone";
163 case KASAN_KMEM_REDZONE:
164 return "KmemRedZone";
165 case KASAN_POOL_REDZONE:
166 return "PoolRedZone";
167 case KASAN_POOL_FREED:
168 return "PoolUseAfterFree";
169 case 1 ... 7:
170 return "RedZonePartial";
171 case KASAN_STACK_LEFT:
172 return "StackLeft";
173 case KASAN_STACK_RIGHT:
174 return "StackRight";
175 case KASAN_STACK_PARTIAL:
176 return "StackPartial";
177 case KASAN_USE_AFTER_SCOPE:
178 return "UseAfterScope";
179 default:
180 return "Unknown";
181 }
182 }
183
184 static void
185 kasan_report(unsigned long addr, size_t size, bool write, unsigned long pc,
186 uint8_t code)
187 {
188 printf("ASan: Unauthorized Access In %p: Addr %p [%zu byte%s, %s,"
189 " %s]\n",
190 (void *)pc, (void *)addr, size, (size > 1 ? "s" : ""),
191 (write ? "write" : "read"), kasan_code_name(code));
192 kasan_md_unwind();
193 }
194
195 static __always_inline void
196 kasan_shadow_1byte_markvalid(unsigned long addr)
197 {
198 int8_t *byte = kasan_md_addr_to_shad((void *)addr);
199 int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
200
201 *byte = last;
202 }
203
204 static __always_inline void
205 kasan_shadow_Nbyte_markvalid(const void *addr, size_t size)
206 {
207 size_t i;
208
209 for (i = 0; i < size; i++) {
210 kasan_shadow_1byte_markvalid((unsigned long)addr+i);
211 }
212 }
213
214 static __always_inline void
215 kasan_shadow_Nbyte_fill(const void *addr, size_t size, uint8_t code)
216 {
217 void *shad;
218
219 if (__predict_false(size == 0))
220 return;
221 if (__predict_false(kasan_md_unsupported((vaddr_t)addr)))
222 return;
223
224 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
225 KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
226
227 shad = (void *)kasan_md_addr_to_shad(addr);
228 size = size >> KASAN_SHADOW_SCALE_SHIFT;
229
230 __builtin_memset(shad, code, size);
231 }
232
233 void
234 kasan_add_redzone(size_t *size)
235 {
236 *size = roundup(*size, KASAN_SHADOW_SCALE_SIZE);
237 *size += KASAN_SHADOW_SCALE_SIZE;
238 }
239
240 static void
241 kasan_markmem(const void *addr, size_t size, bool valid, uint8_t code)
242 {
243 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
244 if (valid) {
245 kasan_shadow_Nbyte_markvalid(addr, size);
246 } else {
247 kasan_shadow_Nbyte_fill(addr, size, code);
248 }
249 }
250
251 void
252 kasan_softint(struct lwp *l)
253 {
254 const void *stk = (const void *)uvm_lwp_getuarea(l);
255
256 kasan_shadow_Nbyte_fill(stk, USPACE, 0);
257 }
258
259 /*
260 * In an area of size 'sz_with_redz', mark the 'size' first bytes as valid,
261 * and the rest as invalid. There are generally two use cases:
262 *
263 * o kasan_mark(addr, origsize, size, code), with origsize < size. This marks
264 * the redzone at the end of the buffer as invalid.
265 *
266 * o kasan_mark(addr, size, size, 0). This marks the entire buffer as valid.
267 */
268 void
269 kasan_mark(const void *addr, size_t size, size_t sz_with_redz, uint8_t code)
270 {
271 kasan_markmem(addr, sz_with_redz, false, code);
272 kasan_markmem(addr, size, true, code);
273 }
274
275 /* -------------------------------------------------------------------------- */
276
277 #define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) \
278 (addr >> KASAN_SHADOW_SCALE_SHIFT) != \
279 ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT)
280
281 static __always_inline bool
282 kasan_shadow_1byte_isvalid(unsigned long addr, uint8_t *code)
283 {
284 int8_t *byte = kasan_md_addr_to_shad((void *)addr);
285 int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
286
287 if (__predict_true(*byte == 0 || last <= *byte)) {
288 return true;
289 }
290 *code = *byte;
291 return false;
292 }
293
294 static __always_inline bool
295 kasan_shadow_2byte_isvalid(unsigned long addr, uint8_t *code)
296 {
297 int8_t *byte, last;
298
299 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) {
300 return (kasan_shadow_1byte_isvalid(addr, code) &&
301 kasan_shadow_1byte_isvalid(addr+1, code));
302 }
303
304 byte = kasan_md_addr_to_shad((void *)addr);
305 last = ((addr + 1) & KASAN_SHADOW_MASK) + 1;
306
307 if (__predict_true(*byte == 0 || last <= *byte)) {
308 return true;
309 }
310 *code = *byte;
311 return false;
312 }
313
314 static __always_inline bool
315 kasan_shadow_4byte_isvalid(unsigned long addr, uint8_t *code)
316 {
317 int8_t *byte, last;
318
319 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) {
320 return (kasan_shadow_2byte_isvalid(addr, code) &&
321 kasan_shadow_2byte_isvalid(addr+2, code));
322 }
323
324 byte = kasan_md_addr_to_shad((void *)addr);
325 last = ((addr + 3) & KASAN_SHADOW_MASK) + 1;
326
327 if (__predict_true(*byte == 0 || last <= *byte)) {
328 return true;
329 }
330 *code = *byte;
331 return false;
332 }
333
334 static __always_inline bool
335 kasan_shadow_8byte_isvalid(unsigned long addr, uint8_t *code)
336 {
337 int8_t *byte, last;
338
339 if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) {
340 return (kasan_shadow_4byte_isvalid(addr, code) &&
341 kasan_shadow_4byte_isvalid(addr+4, code));
342 }
343
344 byte = kasan_md_addr_to_shad((void *)addr);
345 last = ((addr + 7) & KASAN_SHADOW_MASK) + 1;
346
347 if (__predict_true(*byte == 0 || last <= *byte)) {
348 return true;
349 }
350 *code = *byte;
351 return false;
352 }
353
354 static __always_inline bool
355 kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size, uint8_t *code)
356 {
357 size_t i;
358
359 for (i = 0; i < size; i++) {
360 if (!kasan_shadow_1byte_isvalid(addr+i, code))
361 return false;
362 }
363
364 return true;
365 }
366
367 static __always_inline void
368 kasan_shadow_check(unsigned long addr, size_t size, bool write,
369 unsigned long retaddr)
370 {
371 uint8_t code;
372 bool valid;
373
374 if (__predict_false(!kasan_enabled))
375 return;
376 if (__predict_false(size == 0))
377 return;
378 if (__predict_false(kasan_md_unsupported(addr)))
379 return;
380
381 if (__builtin_constant_p(size)) {
382 switch (size) {
383 case 1:
384 valid = kasan_shadow_1byte_isvalid(addr, &code);
385 break;
386 case 2:
387 valid = kasan_shadow_2byte_isvalid(addr, &code);
388 break;
389 case 4:
390 valid = kasan_shadow_4byte_isvalid(addr, &code);
391 break;
392 case 8:
393 valid = kasan_shadow_8byte_isvalid(addr, &code);
394 break;
395 default:
396 valid = kasan_shadow_Nbyte_isvalid(addr, size, &code);
397 break;
398 }
399 } else {
400 valid = kasan_shadow_Nbyte_isvalid(addr, size, &code);
401 }
402
403 if (__predict_false(!valid)) {
404 kasan_report(addr, size, write, retaddr, code);
405 }
406 }
407
408 /* -------------------------------------------------------------------------- */
409
410 void *
411 kasan_memcpy(void *dst, const void *src, size_t len)
412 {
413 kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR);
414 kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR);
415 return __builtin_memcpy(dst, src, len);
416 }
417
418 int
419 kasan_memcmp(const void *b1, const void *b2, size_t len)
420 {
421 kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR);
422 kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR);
423 return __builtin_memcmp(b1, b2, len);
424 }
425
426 void *
427 kasan_memset(void *b, int c, size_t len)
428 {
429 kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR);
430 return __builtin_memset(b, c, len);
431 }
432
433 char *
434 kasan_strcpy(char *dst, const char *src)
435 {
436 char *save = dst;
437
438 while (1) {
439 kasan_shadow_check((unsigned long)src, 1, false, __RET_ADDR);
440 kasan_shadow_check((unsigned long)dst, 1, true, __RET_ADDR);
441 *dst = *src;
442 if (*src == '\0')
443 break;
444 src++, dst++;
445 }
446
447 return save;
448 }
449
450 int
451 kasan_strcmp(const char *s1, const char *s2)
452 {
453 while (1) {
454 kasan_shadow_check((unsigned long)s1, 1, false, __RET_ADDR);
455 kasan_shadow_check((unsigned long)s2, 1, false, __RET_ADDR);
456 if (*s1 != *s2)
457 break;
458 if (*s1 == '\0')
459 return 0;
460 s1++, s2++;
461 }
462
463 return (*(const unsigned char *)s1 - *(const unsigned char *)s2);
464 }
465
466 size_t
467 kasan_strlen(const char *str)
468 {
469 const char *s;
470
471 s = str;
472 while (1) {
473 kasan_shadow_check((unsigned long)s, 1, false, __RET_ADDR);
474 if (*s == '\0')
475 break;
476 s++;
477 }
478
479 return (s - str);
480 }
481
482 #undef kcopy
483 #undef copystr
484 #undef copyinstr
485 #undef copyoutstr
486 #undef copyin
487
488 int kasan_kcopy(const void *, void *, size_t);
489 int kasan_copystr(const void *, void *, size_t, size_t *);
490 int kasan_copyinstr(const void *, void *, size_t, size_t *);
491 int kasan_copyoutstr(const void *, void *, size_t, size_t *);
492 int kasan_copyin(const void *, void *, size_t);
493 int kcopy(const void *, void *, size_t);
494 int copystr(const void *, void *, size_t, size_t *);
495 int copyinstr(const void *, void *, size_t, size_t *);
496 int copyoutstr(const void *, void *, size_t, size_t *);
497 int copyin(const void *, void *, size_t);
498
499 int
500 kasan_kcopy(const void *src, void *dst, size_t len)
501 {
502 kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR);
503 kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR);
504 return kcopy(src, dst, len);
505 }
506
507 int
508 kasan_copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
509 {
510 kasan_shadow_check((unsigned long)kdaddr, len, true, __RET_ADDR);
511 return copystr(kfaddr, kdaddr, len, done);
512 }
513
514 int
515 kasan_copyin(const void *uaddr, void *kaddr, size_t len)
516 {
517 kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR);
518 return copyin(uaddr, kaddr, len);
519 }
520
521 int
522 kasan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
523 {
524 kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR);
525 return copyinstr(uaddr, kaddr, len, done);
526 }
527
528 int
529 kasan_copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
530 {
531 kasan_shadow_check((unsigned long)kaddr, len, false, __RET_ADDR);
532 return copyoutstr(kaddr, uaddr, len, done);
533 }
534
535 /* -------------------------------------------------------------------------- */
536
537 void __asan_register_globals(struct __asan_global *, size_t);
538 void __asan_unregister_globals(struct __asan_global *, size_t);
539
540 void
541 __asan_register_globals(struct __asan_global *globals, size_t n)
542 {
543 size_t i;
544
545 for (i = 0; i < n; i++) {
546 kasan_mark(globals[i].beg, globals[i].size,
547 globals[i].size_with_redzone, KASAN_GENERIC_REDZONE);
548 }
549 }
550
551 void
552 __asan_unregister_globals(struct __asan_global *globals, size_t n)
553 {
554 /* never called */
555 }
556
557 #define ASAN_LOAD_STORE(size) \
558 void __asan_load##size(unsigned long); \
559 void __asan_load##size(unsigned long addr) \
560 { \
561 kasan_shadow_check(addr, size, false, __RET_ADDR);\
562 } \
563 void __asan_load##size##_noabort(unsigned long); \
564 void __asan_load##size##_noabort(unsigned long addr) \
565 { \
566 kasan_shadow_check(addr, size, false, __RET_ADDR);\
567 } \
568 void __asan_store##size(unsigned long); \
569 void __asan_store##size(unsigned long addr) \
570 { \
571 kasan_shadow_check(addr, size, true, __RET_ADDR);\
572 } \
573 void __asan_store##size##_noabort(unsigned long); \
574 void __asan_store##size##_noabort(unsigned long addr) \
575 { \
576 kasan_shadow_check(addr, size, true, __RET_ADDR);\
577 }
578
579 ASAN_LOAD_STORE(1);
580 ASAN_LOAD_STORE(2);
581 ASAN_LOAD_STORE(4);
582 ASAN_LOAD_STORE(8);
583 ASAN_LOAD_STORE(16);
584
585 void __asan_loadN(unsigned long, size_t);
586 void __asan_loadN_noabort(unsigned long, size_t);
587 void __asan_storeN(unsigned long, size_t);
588 void __asan_storeN_noabort(unsigned long, size_t);
589 void __asan_handle_no_return(void);
590
591 void
592 __asan_loadN(unsigned long addr, size_t size)
593 {
594 kasan_shadow_check(addr, size, false, __RET_ADDR);
595 }
596
597 void
598 __asan_loadN_noabort(unsigned long addr, size_t size)
599 {
600 kasan_shadow_check(addr, size, false, __RET_ADDR);
601 }
602
603 void
604 __asan_storeN(unsigned long addr, size_t size)
605 {
606 kasan_shadow_check(addr, size, true, __RET_ADDR);
607 }
608
609 void
610 __asan_storeN_noabort(unsigned long addr, size_t size)
611 {
612 kasan_shadow_check(addr, size, true, __RET_ADDR);
613 }
614
615 void
616 __asan_handle_no_return(void)
617 {
618 /* nothing */
619 }
620
621 #define ASAN_SET_SHADOW(byte) \
622 void __asan_set_shadow_##byte(void *, size_t); \
623 void __asan_set_shadow_##byte(void *addr, size_t size) \
624 { \
625 __builtin_memset((void *)addr, 0x##byte, size); \
626 }
627
628 ASAN_SET_SHADOW(00);
629 ASAN_SET_SHADOW(f1);
630 ASAN_SET_SHADOW(f2);
631 ASAN_SET_SHADOW(f3);
632 ASAN_SET_SHADOW(f5);
633 ASAN_SET_SHADOW(f8);
634
635 void __asan_poison_stack_memory(const void *, size_t);
636 void __asan_unpoison_stack_memory(const void *, size_t);
637
638 void __asan_poison_stack_memory(const void *addr, size_t size)
639 {
640 size = roundup(size, KASAN_SHADOW_SCALE_SIZE);
641 kasan_shadow_Nbyte_fill(addr, size, KASAN_USE_AFTER_SCOPE);
642 }
643
644 void __asan_unpoison_stack_memory(const void *addr, size_t size)
645 {
646 kasan_shadow_Nbyte_markvalid(addr, size);
647 }
648