Home | History | Annotate | Line # | Download | only in interception
      1  1.1  kamil //===-- interception_linux.cc -----------------------------------*- C++ -*-===//
      2  1.1  kamil //
      3  1.1  kamil //                     The LLVM Compiler Infrastructure
      4  1.1  kamil //
      5  1.1  kamil // This file is distributed under the University of Illinois Open Source
      6  1.1  kamil // License. See LICENSE.TXT for details.
      7  1.1  kamil //
      8  1.1  kamil //===----------------------------------------------------------------------===//
      9  1.1  kamil //
     10  1.1  kamil // This file is a part of AddressSanitizer, an address sanity checker.
     11  1.1  kamil //
     12  1.1  kamil // Windows-specific interception methods.
     13  1.1  kamil //
     14  1.1  kamil // This file is implementing several hooking techniques to intercept calls
     15  1.1  kamil // to functions. The hooks are dynamically installed by modifying the assembly
     16  1.1  kamil // code.
     17  1.1  kamil //
     18  1.1  kamil // The hooking techniques are making assumptions on the way the code is
     19  1.1  kamil // generated and are safe under these assumptions.
     20  1.1  kamil //
     21  1.1  kamil // On 64-bit architecture, there is no direct 64-bit jump instruction. To allow
     22  1.1  kamil // arbitrary branching on the whole memory space, the notion of trampoline
     23  1.1  kamil // region is used. A trampoline region is a memory space withing 2G boundary
     24  1.1  kamil // where it is safe to add custom assembly code to build 64-bit jumps.
     25  1.1  kamil //
     26  1.1  kamil // Hooking techniques
     27  1.1  kamil // ==================
     28  1.1  kamil //
     29  1.1  kamil // 1) Detour
     30  1.1  kamil //
     31  1.1  kamil //    The Detour hooking technique is assuming the presence of an header with
     32  1.1  kamil //    padding and an overridable 2-bytes nop instruction (mov edi, edi). The
     33  1.1  kamil //    nop instruction can safely be replaced by a 2-bytes jump without any need
     34  1.1  kamil //    to save the instruction. A jump to the target is encoded in the function
     35  1.1  kamil //    header and the nop instruction is replaced by a short jump to the header.
     36  1.1  kamil //
     37  1.1  kamil //        head:  5 x nop                 head:  jmp <hook>
     38  1.1  kamil //        func:  mov edi, edi    -->     func:  jmp short <head>
     39  1.1  kamil //               [...]                   real:  [...]
     40  1.1  kamil //
     41  1.1  kamil //    This technique is only implemented on 32-bit architecture.
     42  1.1  kamil //    Most of the time, Windows API are hookable with the detour technique.
     43  1.1  kamil //
     44  1.1  kamil // 2) Redirect Jump
     45  1.1  kamil //
     46  1.1  kamil //    The redirect jump is applicable when the first instruction is a direct
     47  1.1  kamil //    jump. The instruction is replaced by jump to the hook.
     48  1.1  kamil //
     49  1.1  kamil //        func:  jmp <label>     -->     func:  jmp <hook>
     50  1.1  kamil //
     51  1.1  kamil //    On an 64-bit architecture, a trampoline is inserted.
     52  1.1  kamil //
     53  1.1  kamil //        func:  jmp <label>     -->     func:  jmp <tramp>
     54  1.1  kamil //                                              [...]
     55  1.1  kamil //
     56  1.1  kamil //                                   [trampoline]
     57  1.1  kamil //                                      tramp:  jmp QWORD [addr]
     58  1.1  kamil //                                       addr:  .bytes <hook>
     59  1.1  kamil //
     60  1.1  kamil //    Note: <real> is equilavent to <label>.
     61  1.1  kamil //
     62  1.1  kamil // 3) HotPatch
     63  1.1  kamil //
     64  1.1  kamil //    The HotPatch hooking is assuming the presence of an header with padding
     65  1.1  kamil //    and a first instruction with at least 2-bytes.
     66  1.1  kamil //
     67  1.1  kamil //    The reason to enforce the 2-bytes limitation is to provide the minimal
     68  1.1  kamil //    space to encode a short jump. HotPatch technique is only rewriting one
     69  1.1  kamil //    instruction to avoid breaking a sequence of instructions containing a
     70  1.1  kamil //    branching target.
     71  1.1  kamil //
     72  1.1  kamil //    Assumptions are enforced by MSVC compiler by using the /HOTPATCH flag.
     73  1.1  kamil //      see: https://msdn.microsoft.com/en-us/library/ms173507.aspx
     74  1.1  kamil //    Default padding length is 5 bytes in 32-bits and 6 bytes in 64-bits.
     75  1.1  kamil //
     76  1.1  kamil //        head:   5 x nop                head:  jmp <hook>
     77  1.1  kamil //        func:   <instr>        -->     func:  jmp short <head>
     78  1.1  kamil //                [...]                  body:  [...]
     79  1.1  kamil //
     80  1.1  kamil //                                   [trampoline]
     81  1.1  kamil //                                       real:  <instr>
     82  1.1  kamil //                                              jmp <body>
     83  1.1  kamil //
     84  1.1  kamil //    On an 64-bit architecture:
     85  1.1  kamil //
     86  1.1  kamil //        head:   6 x nop                head:  jmp QWORD [addr1]
     87  1.1  kamil //        func:   <instr>        -->     func:  jmp short <head>
     88  1.1  kamil //                [...]                  body:  [...]
     89  1.1  kamil //
     90  1.1  kamil //                                   [trampoline]
     91  1.1  kamil //                                      addr1:  .bytes <hook>
     92  1.1  kamil //                                       real:  <instr>
     93  1.1  kamil //                                              jmp QWORD [addr2]
     94  1.1  kamil //                                      addr2:  .bytes <body>
     95  1.1  kamil //
     96  1.1  kamil // 4) Trampoline
     97  1.1  kamil //
     98  1.1  kamil //    The Trampoline hooking technique is the most aggressive one. It is
     99  1.1  kamil //    assuming that there is a sequence of instructions that can be safely
    100  1.1  kamil //    replaced by a jump (enough room and no incoming branches).
    101  1.1  kamil //
    102  1.1  kamil //    Unfortunately, these assumptions can't be safely presumed and code may
    103  1.1  kamil //    be broken after hooking.
    104  1.1  kamil //
    105  1.1  kamil //        func:   <instr>        -->     func:  jmp <hook>
    106  1.1  kamil //                <instr>
    107  1.1  kamil //                [...]                  body:  [...]
    108  1.1  kamil //
    109  1.1  kamil //                                   [trampoline]
    110  1.1  kamil //                                       real:  <instr>
    111  1.1  kamil //                                              <instr>
    112  1.1  kamil //                                              jmp <body>
    113  1.1  kamil //
    114  1.1  kamil //    On an 64-bit architecture:
    115  1.1  kamil //
    116  1.1  kamil //        func:   <instr>        -->     func:  jmp QWORD [addr1]
    117  1.1  kamil //                <instr>
    118  1.1  kamil //                [...]                  body:  [...]
    119  1.1  kamil //
    120  1.1  kamil //                                   [trampoline]
    121  1.1  kamil //                                      addr1:  .bytes <hook>
    122  1.1  kamil //                                       real:  <instr>
    123  1.1  kamil //                                              <instr>
    124  1.1  kamil //                                              jmp QWORD [addr2]
    125  1.1  kamil //                                      addr2:  .bytes <body>
    126  1.1  kamil //===----------------------------------------------------------------------===//
    127  1.1  kamil 
    128  1.1  kamil #include "interception.h"
    129  1.1  kamil 
    130  1.1  kamil #if SANITIZER_WINDOWS
    131  1.1  kamil #include "sanitizer_common/sanitizer_platform.h"
    132  1.1  kamil #define WIN32_LEAN_AND_MEAN
    133  1.1  kamil #include <windows.h>
    134  1.1  kamil 
    135  1.1  kamil namespace __interception {
    136  1.1  kamil 
    137  1.1  kamil static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
    138  1.1  kamil static const int kJumpInstructionLength = 5;
    139  1.1  kamil static const int kShortJumpInstructionLength = 2;
    140  1.1  kamil static const int kIndirectJumpInstructionLength = 6;
    141  1.1  kamil static const int kBranchLength =
    142  1.1  kamil     FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
    143  1.1  kamil static const int kDirectBranchLength = kBranchLength + kAddressLength;
    144  1.1  kamil 
    145  1.1  kamil static void InterceptionFailed() {
    146  1.1  kamil   // Do we have a good way to abort with an error message here?
    147  1.1  kamil   __debugbreak();
    148  1.1  kamil }
    149  1.1  kamil 
    150  1.1  kamil static bool DistanceIsWithin2Gig(uptr from, uptr target) {
    151  1.1  kamil #if SANITIZER_WINDOWS64
    152  1.1  kamil   if (from < target)
    153  1.1  kamil     return target - from <= (uptr)0x7FFFFFFFU;
    154  1.1  kamil   else
    155  1.1  kamil     return from - target <= (uptr)0x80000000U;
    156  1.1  kamil #else
    157  1.1  kamil   // In a 32-bit address space, the address calculation will wrap, so this check
    158  1.1  kamil   // is unnecessary.
    159  1.1  kamil   return true;
    160  1.1  kamil #endif
    161  1.1  kamil }
    162  1.1  kamil 
    163  1.1  kamil static uptr GetMmapGranularity() {
    164  1.1  kamil   SYSTEM_INFO si;
    165  1.1  kamil   GetSystemInfo(&si);
    166  1.1  kamil   return si.dwAllocationGranularity;
    167  1.1  kamil }
    168  1.1  kamil 
    169  1.1  kamil static uptr RoundUpTo(uptr size, uptr boundary) {
    170  1.1  kamil   return (size + boundary - 1) & ~(boundary - 1);
    171  1.1  kamil }
    172  1.1  kamil 
    173  1.1  kamil // FIXME: internal_str* and internal_mem* functions should be moved from the
    174  1.1  kamil // ASan sources into interception/.
    175  1.1  kamil 
    176  1.1  kamil static size_t _strlen(const char *str) {
    177  1.1  kamil   const char* p = str;
    178  1.1  kamil   while (*p != '\0') ++p;
    179  1.1  kamil   return p - str;
    180  1.1  kamil }
    181  1.1  kamil 
    182  1.1  kamil static char* _strchr(char* str, char c) {
    183  1.1  kamil   while (*str) {
    184  1.1  kamil     if (*str == c)
    185  1.1  kamil       return str;
    186  1.1  kamil     ++str;
    187  1.1  kamil   }
    188  1.1  kamil   return nullptr;
    189  1.1  kamil }
    190  1.1  kamil 
    191  1.1  kamil static void _memset(void *p, int value, size_t sz) {
    192  1.1  kamil   for (size_t i = 0; i < sz; ++i)
    193  1.1  kamil     ((char*)p)[i] = (char)value;
    194  1.1  kamil }
    195  1.1  kamil 
    196  1.1  kamil static void _memcpy(void *dst, void *src, size_t sz) {
    197  1.1  kamil   char *dst_c = (char*)dst,
    198  1.1  kamil        *src_c = (char*)src;
    199  1.1  kamil   for (size_t i = 0; i < sz; ++i)
    200  1.1  kamil     dst_c[i] = src_c[i];
    201  1.1  kamil }
    202  1.1  kamil 
    203  1.1  kamil static bool ChangeMemoryProtection(
    204  1.1  kamil     uptr address, uptr size, DWORD *old_protection) {
    205  1.1  kamil   return ::VirtualProtect((void*)address, size,
    206  1.1  kamil                           PAGE_EXECUTE_READWRITE,
    207  1.1  kamil                           old_protection) != FALSE;
    208  1.1  kamil }
    209  1.1  kamil 
    210  1.1  kamil static bool RestoreMemoryProtection(
    211  1.1  kamil     uptr address, uptr size, DWORD old_protection) {
    212  1.1  kamil   DWORD unused;
    213  1.1  kamil   return ::VirtualProtect((void*)address, size,
    214  1.1  kamil                           old_protection,
    215  1.1  kamil                           &unused) != FALSE;
    216  1.1  kamil }
    217  1.1  kamil 
    218  1.1  kamil static bool IsMemoryPadding(uptr address, uptr size) {
    219  1.1  kamil   u8* function = (u8*)address;
    220  1.1  kamil   for (size_t i = 0; i < size; ++i)
    221  1.1  kamil     if (function[i] != 0x90 && function[i] != 0xCC)
    222  1.1  kamil       return false;
    223  1.1  kamil   return true;
    224  1.1  kamil }
    225  1.1  kamil 
    226  1.1  kamil static const u8 kHintNop8Bytes[] = {
    227  1.1  kamil   0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
    228  1.1  kamil };
    229  1.1  kamil 
    230  1.1  kamil template<class T>
    231  1.1  kamil static bool FunctionHasPrefix(uptr address, const T &pattern) {
    232  1.1  kamil   u8* function = (u8*)address - sizeof(pattern);
    233  1.1  kamil   for (size_t i = 0; i < sizeof(pattern); ++i)
    234  1.1  kamil     if (function[i] != pattern[i])
    235  1.1  kamil       return false;
    236  1.1  kamil   return true;
    237  1.1  kamil }
    238  1.1  kamil 
    239  1.1  kamil static bool FunctionHasPadding(uptr address, uptr size) {
    240  1.1  kamil   if (IsMemoryPadding(address - size, size))
    241  1.1  kamil     return true;
    242  1.1  kamil   if (size <= sizeof(kHintNop8Bytes) &&
    243  1.1  kamil       FunctionHasPrefix(address, kHintNop8Bytes))
    244  1.1  kamil     return true;
    245  1.1  kamil   return false;
    246  1.1  kamil }
    247  1.1  kamil 
    248  1.1  kamil static void WritePadding(uptr from, uptr size) {
    249  1.1  kamil   _memset((void*)from, 0xCC, (size_t)size);
    250  1.1  kamil }
    251  1.1  kamil 
    252  1.1  kamil static void WriteJumpInstruction(uptr from, uptr target) {
    253  1.1  kamil   if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
    254  1.1  kamil     InterceptionFailed();
    255  1.1  kamil   ptrdiff_t offset = target - from - kJumpInstructionLength;
    256  1.1  kamil   *(u8*)from = 0xE9;
    257  1.1  kamil   *(u32*)(from + 1) = offset;
    258  1.1  kamil }
    259  1.1  kamil 
    260  1.1  kamil static void WriteShortJumpInstruction(uptr from, uptr target) {
    261  1.1  kamil   sptr offset = target - from - kShortJumpInstructionLength;
    262  1.1  kamil   if (offset < -128 || offset > 127)
    263  1.1  kamil     InterceptionFailed();
    264  1.1  kamil   *(u8*)from = 0xEB;
    265  1.1  kamil   *(u8*)(from + 1) = (u8)offset;
    266  1.1  kamil }
    267  1.1  kamil 
    268  1.1  kamil #if SANITIZER_WINDOWS64
    269  1.1  kamil static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
    270  1.1  kamil   // jmp [rip + <offset>] = FF 25 <offset> where <offset> is a relative
    271  1.1  kamil   // offset.
    272  1.1  kamil   // The offset is the distance from then end of the jump instruction to the
    273  1.1  kamil   // memory location containing the targeted address. The displacement is still
    274  1.1  kamil   // 32-bit in x64, so indirect_target must be located within +/- 2GB range.
    275  1.1  kamil   int offset = indirect_target - from - kIndirectJumpInstructionLength;
    276  1.1  kamil   if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
    277  1.1  kamil                             indirect_target)) {
    278  1.1  kamil     InterceptionFailed();
    279  1.1  kamil   }
    280  1.1  kamil   *(u16*)from = 0x25FF;
    281  1.1  kamil   *(u32*)(from + 2) = offset;
    282  1.1  kamil }
    283  1.1  kamil #endif
    284  1.1  kamil 
    285  1.1  kamil static void WriteBranch(
    286  1.1  kamil     uptr from, uptr indirect_target, uptr target) {
    287  1.1  kamil #if SANITIZER_WINDOWS64
    288  1.1  kamil   WriteIndirectJumpInstruction(from, indirect_target);
    289  1.1  kamil   *(u64*)indirect_target = target;
    290  1.1  kamil #else
    291  1.1  kamil   (void)indirect_target;
    292  1.1  kamil   WriteJumpInstruction(from, target);
    293  1.1  kamil #endif
    294  1.1  kamil }
    295  1.1  kamil 
    296  1.1  kamil static void WriteDirectBranch(uptr from, uptr target) {
    297  1.1  kamil #if SANITIZER_WINDOWS64
    298  1.1  kamil   // Emit an indirect jump through immediately following bytes:
    299  1.1  kamil   //   jmp [rip + kBranchLength]
    300  1.1  kamil   //   .quad <target>
    301  1.1  kamil   WriteBranch(from, from + kBranchLength, target);
    302  1.1  kamil #else
    303  1.1  kamil   WriteJumpInstruction(from, target);
    304  1.1  kamil #endif
    305  1.1  kamil }
    306  1.1  kamil 
    307  1.1  kamil struct TrampolineMemoryRegion {
    308  1.1  kamil   uptr content;
    309  1.1  kamil   uptr allocated_size;
    310  1.1  kamil   uptr max_size;
    311  1.1  kamil };
    312  1.1  kamil 
    313  1.1  kamil static const uptr kTrampolineScanLimitRange = 1 << 31;  // 2 gig
    314  1.1  kamil static const int kMaxTrampolineRegion = 1024;
    315  1.1  kamil static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
    316  1.1  kamil 
    317  1.1  kamil static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {
    318  1.1  kamil #if SANITIZER_WINDOWS64
    319  1.1  kamil   uptr address = image_address;
    320  1.1  kamil   uptr scanned = 0;
    321  1.1  kamil   while (scanned < kTrampolineScanLimitRange) {
    322  1.1  kamil     MEMORY_BASIC_INFORMATION info;
    323  1.1  kamil     if (!::VirtualQuery((void*)address, &info, sizeof(info)))
    324  1.1  kamil       return nullptr;
    325  1.1  kamil 
    326  1.1  kamil     // Check whether a region can be allocated at |address|.
    327  1.1  kamil     if (info.State == MEM_FREE && info.RegionSize >= granularity) {
    328  1.1  kamil       void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),
    329  1.1  kamil                                   granularity,
    330  1.1  kamil                                   MEM_RESERVE | MEM_COMMIT,
    331  1.1  kamil                                   PAGE_EXECUTE_READWRITE);
    332  1.1  kamil       return page;
    333  1.1  kamil     }
    334  1.1  kamil 
    335  1.1  kamil     // Move to the next region.
    336  1.1  kamil     address = (uptr)info.BaseAddress + info.RegionSize;
    337  1.1  kamil     scanned += info.RegionSize;
    338  1.1  kamil   }
    339  1.1  kamil   return nullptr;
    340  1.1  kamil #else
    341  1.1  kamil   return ::VirtualAlloc(nullptr,
    342  1.1  kamil                         granularity,
    343  1.1  kamil                         MEM_RESERVE | MEM_COMMIT,
    344  1.1  kamil                         PAGE_EXECUTE_READWRITE);
    345  1.1  kamil #endif
    346  1.1  kamil }
    347  1.1  kamil 
    348  1.1  kamil // Used by unittests to release mapped memory space.
    349  1.1  kamil void TestOnlyReleaseTrampolineRegions() {
    350  1.1  kamil   for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
    351  1.1  kamil     TrampolineMemoryRegion *current = &TrampolineRegions[bucket];
    352  1.1  kamil     if (current->content == 0)
    353  1.1  kamil       return;
    354  1.1  kamil     ::VirtualFree((void*)current->content, 0, MEM_RELEASE);
    355  1.1  kamil     current->content = 0;
    356  1.1  kamil   }
    357  1.1  kamil }
    358  1.1  kamil 
    359  1.1  kamil static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
    360  1.1  kamil   // Find a region within 2G with enough space to allocate |size| bytes.
    361  1.1  kamil   TrampolineMemoryRegion *region = nullptr;
    362  1.1  kamil   for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
    363  1.1  kamil     TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
    364  1.1  kamil     if (current->content == 0) {
    365  1.1  kamil       // No valid region found, allocate a new region.
    366  1.1  kamil       size_t bucket_size = GetMmapGranularity();
    367  1.1  kamil       void *content = AllocateTrampolineRegion(image_address, bucket_size);
    368  1.1  kamil       if (content == nullptr)
    369  1.1  kamil         return 0U;
    370  1.1  kamil 
    371  1.1  kamil       current->content = (uptr)content;
    372  1.1  kamil       current->allocated_size = 0;
    373  1.1  kamil       current->max_size = bucket_size;
    374  1.1  kamil       region = current;
    375  1.1  kamil       break;
    376  1.1  kamil     } else if (current->max_size - current->allocated_size > size) {
    377  1.1  kamil #if SANITIZER_WINDOWS64
    378  1.1  kamil         // In 64-bits, the memory space must be allocated within 2G boundary.
    379  1.1  kamil         uptr next_address = current->content + current->allocated_size;
    380  1.1  kamil         if (next_address < image_address ||
    381  1.1  kamil             next_address - image_address >= 0x7FFF0000)
    382  1.1  kamil           continue;
    383  1.1  kamil #endif
    384  1.1  kamil       // The space can be allocated in the current region.
    385  1.1  kamil       region = current;
    386  1.1  kamil       break;
    387  1.1  kamil     }
    388  1.1  kamil   }
    389  1.1  kamil 
    390  1.1  kamil   // Failed to find a region.
    391  1.1  kamil   if (region == nullptr)
    392  1.1  kamil     return 0U;
    393  1.1  kamil 
    394  1.1  kamil   // Allocate the space in the current region.
    395  1.1  kamil   uptr allocated_space = region->content + region->allocated_size;
    396  1.1  kamil   region->allocated_size += size;
    397  1.1  kamil   WritePadding(allocated_space, size);
    398  1.1  kamil 
    399  1.1  kamil   return allocated_space;
    400  1.1  kamil }
    401  1.1  kamil 
    402  1.1  kamil // Returns 0 on error.
    403  1.1  kamil static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
    404  1.1  kamil   switch (*(u64*)address) {
    405  1.1  kamil     case 0x90909090909006EB:  // stub: jmp over 6 x nop.
    406  1.1  kamil       return 8;
    407  1.1  kamil   }
    408  1.1  kamil 
    409  1.1  kamil   switch (*(u8*)address) {
    410  1.1  kamil     case 0x90:  // 90 : nop
    411  1.1  kamil       return 1;
    412  1.1  kamil 
    413  1.1  kamil     case 0x50:  // push eax / rax
    414  1.1  kamil     case 0x51:  // push ecx / rcx
    415  1.1  kamil     case 0x52:  // push edx / rdx
    416  1.1  kamil     case 0x53:  // push ebx / rbx
    417  1.1  kamil     case 0x54:  // push esp / rsp
    418  1.1  kamil     case 0x55:  // push ebp / rbp
    419  1.1  kamil     case 0x56:  // push esi / rsi
    420  1.1  kamil     case 0x57:  // push edi / rdi
    421  1.1  kamil     case 0x5D:  // pop ebp / rbp
    422  1.1  kamil       return 1;
    423  1.1  kamil 
    424  1.1  kamil     case 0x6A:  // 6A XX = push XX
    425  1.1  kamil       return 2;
    426  1.1  kamil 
    427  1.1  kamil     case 0xb8:  // b8 XX XX XX XX : mov eax, XX XX XX XX
    428  1.1  kamil     case 0xB9:  // b9 XX XX XX XX : mov ecx, XX XX XX XX
    429  1.1  kamil       return 5;
    430  1.1  kamil 
    431  1.1  kamil     // Cannot overwrite control-instruction. Return 0 to indicate failure.
    432  1.1  kamil     case 0xE9:  // E9 XX XX XX XX : jmp <label>
    433  1.1  kamil     case 0xE8:  // E8 XX XX XX XX : call <func>
    434  1.1  kamil     case 0xC3:  // C3 : ret
    435  1.1  kamil     case 0xEB:  // EB XX : jmp XX (short jump)
    436  1.1  kamil     case 0x70:  // 7Y YY : jy XX (short conditional jump)
    437  1.1  kamil     case 0x71:
    438  1.1  kamil     case 0x72:
    439  1.1  kamil     case 0x73:
    440  1.1  kamil     case 0x74:
    441  1.1  kamil     case 0x75:
    442  1.1  kamil     case 0x76:
    443  1.1  kamil     case 0x77:
    444  1.1  kamil     case 0x78:
    445  1.1  kamil     case 0x79:
    446  1.1  kamil     case 0x7A:
    447  1.1  kamil     case 0x7B:
    448  1.1  kamil     case 0x7C:
    449  1.1  kamil     case 0x7D:
    450  1.1  kamil     case 0x7E:
    451  1.1  kamil     case 0x7F:
    452  1.1  kamil       return 0;
    453  1.1  kamil   }
    454  1.1  kamil 
    455  1.1  kamil   switch (*(u16*)(address)) {
    456  1.1  kamil     case 0x018A:  // 8A 01 : mov al, byte ptr [ecx]
    457  1.1  kamil     case 0xFF8B:  // 8B FF : mov edi, edi
    458  1.1  kamil     case 0xEC8B:  // 8B EC : mov ebp, esp
    459  1.1  kamil     case 0xc889:  // 89 C8 : mov eax, ecx
    460  1.1  kamil     case 0xC18B:  // 8B C1 : mov eax, ecx
    461  1.1  kamil     case 0xC033:  // 33 C0 : xor eax, eax
    462  1.1  kamil     case 0xC933:  // 33 C9 : xor ecx, ecx
    463  1.1  kamil     case 0xD233:  // 33 D2 : xor edx, edx
    464  1.1  kamil       return 2;
    465  1.1  kamil 
    466  1.1  kamil     // Cannot overwrite control-instruction. Return 0 to indicate failure.
    467  1.1  kamil     case 0x25FF:  // FF 25 XX XX XX XX : jmp [XXXXXXXX]
    468  1.1  kamil       return 0;
    469  1.1  kamil   }
    470  1.1  kamil 
    471  1.1  kamil   switch (0x00FFFFFF & *(u32*)address) {
    472  1.1  kamil     case 0x24A48D:  // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
    473  1.1  kamil       return 7;
    474  1.1  kamil   }
    475  1.1  kamil 
    476  1.1  kamil #if SANITIZER_WINDOWS64
    477  1.1  kamil   switch (*(u8*)address) {
    478  1.1  kamil     case 0xA1:  // A1 XX XX XX XX XX XX XX XX :
    479  1.1  kamil                 //   movabs eax, dword ptr ds:[XXXXXXXX]
    480  1.1  kamil       return 9;
    481  1.1  kamil   }
    482  1.1  kamil 
    483  1.1  kamil   switch (*(u16*)address) {
    484  1.1  kamil     case 0x5040:  // push rax
    485  1.1  kamil     case 0x5140:  // push rcx
    486  1.1  kamil     case 0x5240:  // push rdx
    487  1.1  kamil     case 0x5340:  // push rbx
    488  1.1  kamil     case 0x5440:  // push rsp
    489  1.1  kamil     case 0x5540:  // push rbp
    490  1.1  kamil     case 0x5640:  // push rsi
    491  1.1  kamil     case 0x5740:  // push rdi
    492  1.1  kamil     case 0x5441:  // push r12
    493  1.1  kamil     case 0x5541:  // push r13
    494  1.1  kamil     case 0x5641:  // push r14
    495  1.1  kamil     case 0x5741:  // push r15
    496  1.1  kamil     case 0x9066:  // Two-byte NOP
    497  1.1  kamil       return 2;
    498  1.1  kamil 
    499  1.1  kamil     case 0x058B:  // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
    500  1.1  kamil       if (rel_offset)
    501  1.1  kamil         *rel_offset = 2;
    502  1.1  kamil       return 6;
    503  1.1  kamil   }
    504  1.1  kamil 
    505  1.1  kamil   switch (0x00FFFFFF & *(u32*)address) {
    506  1.1  kamil     case 0xe58948:    // 48 8b c4 : mov rbp, rsp
    507  1.1  kamil     case 0xc18b48:    // 48 8b c1 : mov rax, rcx
    508  1.1  kamil     case 0xc48b48:    // 48 8b c4 : mov rax, rsp
    509  1.1  kamil     case 0xd9f748:    // 48 f7 d9 : neg rcx
    510  1.1  kamil     case 0xd12b48:    // 48 2b d1 : sub rdx, rcx
    511  1.1  kamil     case 0x07c1f6:    // f6 c1 07 : test cl, 0x7
    512  1.1  kamil     case 0xc98548:    // 48 85 C9 : test rcx, rcx
    513  1.1  kamil     case 0xc0854d:    // 4d 85 c0 : test r8, r8
    514  1.1  kamil     case 0xc2b60f:    // 0f b6 c2 : movzx eax, dl
    515  1.1  kamil     case 0xc03345:    // 45 33 c0 : xor r8d, r8d
    516  1.1  kamil     case 0xdb3345:    // 45 33 DB : xor r11d, r11d
    517  1.1  kamil     case 0xd98b4c:    // 4c 8b d9 : mov r11, rcx
    518  1.1  kamil     case 0xd28b4c:    // 4c 8b d2 : mov r10, rdx
    519  1.1  kamil     case 0xc98b4c:    // 4C 8B C9 : mov r9, rcx
    520  1.1  kamil     case 0xd2b60f:    // 0f b6 d2 : movzx edx, dl
    521  1.1  kamil     case 0xca2b48:    // 48 2b ca : sub rcx, rdx
    522  1.1  kamil     case 0x10b70f:    // 0f b7 10 : movzx edx, WORD PTR [rax]
    523  1.1  kamil     case 0xc00b4d:    // 3d 0b c0 : or r8, r8
    524  1.1  kamil     case 0xd18b48:    // 48 8b d1 : mov rdx, rcx
    525  1.1  kamil     case 0xdc8b4c:    // 4c 8b dc : mov r11, rsp
    526  1.1  kamil     case 0xd18b4c:    // 4c 8b d1 : mov r10, rcx
    527  1.1  kamil       return 3;
    528  1.1  kamil 
    529  1.1  kamil     case 0xec8348:    // 48 83 ec XX : sub rsp, XX
    530  1.1  kamil     case 0xf88349:    // 49 83 f8 XX : cmp r8, XX
    531  1.1  kamil     case 0x588948:    // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
    532  1.1  kamil       return 4;
    533  1.1  kamil 
    534  1.1  kamil     case 0xec8148:    // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
    535  1.1  kamil       return 7;
    536  1.1  kamil 
    537  1.1  kamil     case 0x058b48:    // 48 8b 05 XX XX XX XX :
    538  1.1  kamil                       //   mov rax, QWORD PTR [rip + XXXXXXXX]
    539  1.1  kamil     case 0x25ff48:    // 48 ff 25 XX XX XX XX :
    540  1.1  kamil                       //   rex.W jmp QWORD PTR [rip + XXXXXXXX]
    541  1.1  kamil 
    542  1.1  kamil       // Instructions having offset relative to 'rip' need offset adjustment.
    543  1.1  kamil       if (rel_offset)
    544  1.1  kamil         *rel_offset = 3;
    545  1.1  kamil       return 7;
    546  1.1  kamil 
    547  1.1  kamil     case 0x2444c7:    // C7 44 24 XX YY YY YY YY
    548  1.1  kamil                       //   mov dword ptr [rsp + XX], YYYYYYYY
    549  1.1  kamil       return 8;
    550  1.1  kamil   }
    551  1.1  kamil 
    552  1.1  kamil   switch (*(u32*)(address)) {
    553  1.1  kamil     case 0x24448b48:  // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
    554  1.1  kamil     case 0x246c8948:  // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
    555  1.1  kamil     case 0x245c8948:  // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
    556  1.1  kamil     case 0x24748948:  // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
    557  1.1  kamil     case 0x244C8948:  // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
    558  1.1  kamil       return 5;
    559  1.1  kamil     case 0x24648348:  // 48 83 64 24 XX : and QWORD PTR [rsp + XX], YY
    560  1.1  kamil       return 6;
    561  1.1  kamil   }
    562  1.1  kamil 
    563  1.1  kamil #else
    564  1.1  kamil 
    565  1.1  kamil   switch (*(u8*)address) {
    566  1.1  kamil     case 0xA1:  // A1 XX XX XX XX :  mov eax, dword ptr ds:[XXXXXXXX]
    567  1.1  kamil       return 5;
    568  1.1  kamil   }
    569  1.1  kamil   switch (*(u16*)address) {
    570  1.1  kamil     case 0x458B:  // 8B 45 XX : mov eax, dword ptr [ebp + XX]
    571  1.1  kamil     case 0x5D8B:  // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
    572  1.1  kamil     case 0x7D8B:  // 8B 7D XX : mov edi, dword ptr [ebp + XX]
    573  1.1  kamil     case 0xEC83:  // 83 EC XX : sub esp, XX
    574  1.1  kamil     case 0x75FF:  // FF 75 XX : push dword ptr [ebp + XX]
    575  1.1  kamil       return 3;
    576  1.1  kamil     case 0xC1F7:  // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX
    577  1.1  kamil     case 0x25FF:  // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
    578  1.1  kamil       return 6;
    579  1.1  kamil     case 0x3D83:  // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX
    580  1.1  kamil       return 7;
    581  1.1  kamil     case 0x7D83:  // 83 7D XX YY : cmp dword ptr [ebp + XX], YY
    582  1.1  kamil       return 4;
    583  1.1  kamil   }
    584  1.1  kamil 
    585  1.1  kamil   switch (0x00FFFFFF & *(u32*)address) {
    586  1.1  kamil     case 0x24448A:  // 8A 44 24 XX : mov eal, dword ptr [esp + XX]
    587  1.1  kamil     case 0x24448B:  // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
    588  1.1  kamil     case 0x244C8B:  // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
    589  1.1  kamil     case 0x24548B:  // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
    590  1.1  kamil     case 0x24748B:  // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
    591  1.1  kamil     case 0x247C8B:  // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
    592  1.1  kamil       return 4;
    593  1.1  kamil   }
    594  1.1  kamil 
    595  1.1  kamil   switch (*(u32*)address) {
    596  1.1  kamil     case 0x2444B60F:  // 0F B6 44 24 XX : movzx eax, byte ptr [esp + XX]
    597  1.1  kamil       return 5;
    598  1.1  kamil   }
    599  1.1  kamil #endif
    600  1.1  kamil 
    601  1.1  kamil   // Unknown instruction!
    602  1.1  kamil   // FIXME: Unknown instruction failures might happen when we add a new
    603  1.1  kamil   // interceptor or a new compiler version. In either case, they should result
    604  1.1  kamil   // in visible and readable error messages. However, merely calling abort()
    605  1.1  kamil   // leads to an infinite recursion in CheckFailed.
    606  1.1  kamil   InterceptionFailed();
    607  1.1  kamil   return 0;
    608  1.1  kamil }
    609  1.1  kamil 
    610  1.1  kamil // Returns 0 on error.
    611  1.1  kamil static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
    612  1.1  kamil   size_t cursor = 0;
    613  1.1  kamil   while (cursor < size) {
    614  1.1  kamil     size_t instruction_size = GetInstructionSize(address + cursor);
    615  1.1  kamil     if (!instruction_size)
    616  1.1  kamil       return 0;
    617  1.1  kamil     cursor += instruction_size;
    618  1.1  kamil   }
    619  1.1  kamil   return cursor;
    620  1.1  kamil }
    621  1.1  kamil 
    622  1.1  kamil static bool CopyInstructions(uptr to, uptr from, size_t size) {
    623  1.1  kamil   size_t cursor = 0;
    624  1.1  kamil   while (cursor != size) {
    625  1.1  kamil     size_t rel_offset = 0;
    626  1.1  kamil     size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
    627  1.1  kamil     _memcpy((void*)(to + cursor), (void*)(from + cursor),
    628  1.1  kamil             (size_t)instruction_size);
    629  1.1  kamil     if (rel_offset) {
    630  1.1  kamil       uptr delta = to - from;
    631  1.1  kamil       uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
    632  1.1  kamil #if SANITIZER_WINDOWS64
    633  1.1  kamil       if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
    634  1.1  kamil         return false;
    635  1.1  kamil #endif
    636  1.1  kamil       *(u32*)(to + cursor + rel_offset) = relocated_offset;
    637  1.1  kamil     }
    638  1.1  kamil     cursor += instruction_size;
    639  1.1  kamil   }
    640  1.1  kamil   return true;
    641  1.1  kamil }
    642  1.1  kamil 
    643  1.1  kamil 
    644  1.1  kamil #if !SANITIZER_WINDOWS64
    645  1.1  kamil bool OverrideFunctionWithDetour(
    646  1.1  kamil     uptr old_func, uptr new_func, uptr *orig_old_func) {
    647  1.1  kamil   const int kDetourHeaderLen = 5;
    648  1.1  kamil   const u16 kDetourInstruction = 0xFF8B;
    649  1.1  kamil 
    650  1.1  kamil   uptr header = (uptr)old_func - kDetourHeaderLen;
    651  1.1  kamil   uptr patch_length = kDetourHeaderLen + kShortJumpInstructionLength;
    652  1.1  kamil 
    653  1.1  kamil   // Validate that the function is hookable.
    654  1.1  kamil   if (*(u16*)old_func != kDetourInstruction ||
    655  1.1  kamil       !IsMemoryPadding(header, kDetourHeaderLen))
    656  1.1  kamil     return false;
    657  1.1  kamil 
    658  1.1  kamil   // Change memory protection to writable.
    659  1.1  kamil   DWORD protection = 0;
    660  1.1  kamil   if (!ChangeMemoryProtection(header, patch_length, &protection))
    661  1.1  kamil     return false;
    662  1.1  kamil 
    663  1.1  kamil   // Write a relative jump to the redirected function.
    664  1.1  kamil   WriteJumpInstruction(header, new_func);
    665  1.1  kamil 
    666  1.1  kamil   // Write the short jump to the function prefix.
    667  1.1  kamil   WriteShortJumpInstruction(old_func, header);
    668  1.1  kamil 
    669  1.1  kamil   // Restore previous memory protection.
    670  1.1  kamil   if (!RestoreMemoryProtection(header, patch_length, protection))
    671  1.1  kamil     return false;
    672  1.1  kamil 
    673  1.1  kamil   if (orig_old_func)
    674  1.1  kamil     *orig_old_func = old_func + kShortJumpInstructionLength;
    675  1.1  kamil 
    676  1.1  kamil   return true;
    677  1.1  kamil }
    678  1.1  kamil #endif
    679  1.1  kamil 
    680  1.1  kamil bool OverrideFunctionWithRedirectJump(
    681  1.1  kamil     uptr old_func, uptr new_func, uptr *orig_old_func) {
    682  1.1  kamil   // Check whether the first instruction is a relative jump.
    683  1.1  kamil   if (*(u8*)old_func != 0xE9)
    684  1.1  kamil     return false;
    685  1.1  kamil 
    686  1.1  kamil   if (orig_old_func) {
    687  1.1  kamil     uptr relative_offset = *(u32*)(old_func + 1);
    688  1.1  kamil     uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
    689  1.1  kamil     *orig_old_func = absolute_target;
    690  1.1  kamil   }
    691  1.1  kamil 
    692  1.1  kamil #if SANITIZER_WINDOWS64
    693  1.1  kamil   // If needed, get memory space for a trampoline jump.
    694  1.1  kamil   uptr trampoline = AllocateMemoryForTrampoline(old_func, kDirectBranchLength);
    695  1.1  kamil   if (!trampoline)
    696  1.1  kamil     return false;
    697  1.1  kamil   WriteDirectBranch(trampoline, new_func);
    698  1.1  kamil #endif
    699  1.1  kamil 
    700  1.1  kamil   // Change memory protection to writable.
    701  1.1  kamil   DWORD protection = 0;
    702  1.1  kamil   if (!ChangeMemoryProtection(old_func, kJumpInstructionLength, &protection))
    703  1.1  kamil     return false;
    704  1.1  kamil 
    705  1.1  kamil   // Write a relative jump to the redirected function.
    706  1.1  kamil   WriteJumpInstruction(old_func, FIRST_32_SECOND_64(new_func, trampoline));
    707  1.1  kamil 
    708  1.1  kamil   // Restore previous memory protection.
    709  1.1  kamil   if (!RestoreMemoryProtection(old_func, kJumpInstructionLength, protection))
    710  1.1  kamil     return false;
    711  1.1  kamil 
    712  1.1  kamil   return true;
    713  1.1  kamil }
    714  1.1  kamil 
    715  1.1  kamil bool OverrideFunctionWithHotPatch(
    716  1.1  kamil     uptr old_func, uptr new_func, uptr *orig_old_func) {
    717  1.1  kamil   const int kHotPatchHeaderLen = kBranchLength;
    718  1.1  kamil 
    719  1.1  kamil   uptr header = (uptr)old_func - kHotPatchHeaderLen;
    720  1.1  kamil   uptr patch_length = kHotPatchHeaderLen + kShortJumpInstructionLength;
    721  1.1  kamil 
    722  1.1  kamil   // Validate that the function is hot patchable.
    723  1.1  kamil   size_t instruction_size = GetInstructionSize(old_func);
    724  1.1  kamil   if (instruction_size < kShortJumpInstructionLength ||
    725  1.1  kamil       !FunctionHasPadding(old_func, kHotPatchHeaderLen))
    726  1.1  kamil     return false;
    727  1.1  kamil 
    728  1.1  kamil   if (orig_old_func) {
    729  1.1  kamil     // Put the needed instructions into the trampoline bytes.
    730  1.1  kamil     uptr trampoline_length = instruction_size + kDirectBranchLength;
    731  1.1  kamil     uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
    732  1.1  kamil     if (!trampoline)
    733  1.1  kamil       return false;
    734  1.1  kamil     if (!CopyInstructions(trampoline, old_func, instruction_size))
    735  1.1  kamil       return false;
    736  1.1  kamil     WriteDirectBranch(trampoline + instruction_size,
    737  1.1  kamil                       old_func + instruction_size);
    738  1.1  kamil     *orig_old_func = trampoline;
    739  1.1  kamil   }
    740  1.1  kamil 
    741  1.1  kamil   // If needed, get memory space for indirect address.
    742  1.1  kamil   uptr indirect_address = 0;
    743  1.1  kamil #if SANITIZER_WINDOWS64
    744  1.1  kamil   indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
    745  1.1  kamil   if (!indirect_address)
    746  1.1  kamil     return false;
    747  1.1  kamil #endif
    748  1.1  kamil 
    749  1.1  kamil   // Change memory protection to writable.
    750  1.1  kamil   DWORD protection = 0;
    751  1.1  kamil   if (!ChangeMemoryProtection(header, patch_length, &protection))
    752  1.1  kamil     return false;
    753  1.1  kamil 
    754  1.1  kamil   // Write jumps to the redirected function.
    755  1.1  kamil   WriteBranch(header, indirect_address, new_func);
    756  1.1  kamil   WriteShortJumpInstruction(old_func, header);
    757  1.1  kamil 
    758  1.1  kamil   // Restore previous memory protection.
    759  1.1  kamil   if (!RestoreMemoryProtection(header, patch_length, protection))
    760  1.1  kamil     return false;
    761  1.1  kamil 
    762  1.1  kamil   return true;
    763  1.1  kamil }
    764  1.1  kamil 
    765  1.1  kamil bool OverrideFunctionWithTrampoline(
    766  1.1  kamil     uptr old_func, uptr new_func, uptr *orig_old_func) {
    767  1.1  kamil 
    768  1.1  kamil   size_t instructions_length = kBranchLength;
    769  1.1  kamil   size_t padding_length = 0;
    770  1.1  kamil   uptr indirect_address = 0;
    771  1.1  kamil 
    772  1.1  kamil   if (orig_old_func) {
    773  1.1  kamil     // Find out the number of bytes of the instructions we need to copy
    774  1.1  kamil     // to the trampoline.
    775  1.1  kamil     instructions_length = RoundUpToInstrBoundary(kBranchLength, old_func);
    776  1.1  kamil     if (!instructions_length)
    777  1.1  kamil       return false;
    778  1.1  kamil 
    779  1.1  kamil     // Put the needed instructions into the trampoline bytes.
    780  1.1  kamil     uptr trampoline_length = instructions_length + kDirectBranchLength;
    781  1.1  kamil     uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
    782  1.1  kamil     if (!trampoline)
    783  1.1  kamil       return false;
    784  1.1  kamil     if (!CopyInstructions(trampoline, old_func, instructions_length))
    785  1.1  kamil       return false;
    786  1.1  kamil     WriteDirectBranch(trampoline + instructions_length,
    787  1.1  kamil                       old_func + instructions_length);
    788  1.1  kamil     *orig_old_func = trampoline;
    789  1.1  kamil   }
    790  1.1  kamil 
    791  1.1  kamil #if SANITIZER_WINDOWS64
    792  1.1  kamil   // Check if the targeted address can be encoded in the function padding.
    793  1.1  kamil   // Otherwise, allocate it in the trampoline region.
    794  1.1  kamil   if (IsMemoryPadding(old_func - kAddressLength, kAddressLength)) {
    795  1.1  kamil     indirect_address = old_func - kAddressLength;
    796  1.1  kamil     padding_length = kAddressLength;
    797  1.1  kamil   } else {
    798  1.1  kamil     indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
    799  1.1  kamil     if (!indirect_address)
    800  1.1  kamil       return false;
    801  1.1  kamil   }
    802  1.1  kamil #endif
    803  1.1  kamil 
    804  1.1  kamil   // Change memory protection to writable.
    805  1.1  kamil   uptr patch_address = old_func - padding_length;
    806  1.1  kamil   uptr patch_length = instructions_length + padding_length;
    807  1.1  kamil   DWORD protection = 0;
    808  1.1  kamil   if (!ChangeMemoryProtection(patch_address, patch_length, &protection))
    809  1.1  kamil     return false;
    810  1.1  kamil 
    811  1.1  kamil   // Patch the original function.
    812  1.1  kamil   WriteBranch(old_func, indirect_address, new_func);
    813  1.1  kamil 
    814  1.1  kamil   // Restore previous memory protection.
    815  1.1  kamil   if (!RestoreMemoryProtection(patch_address, patch_length, protection))
    816  1.1  kamil     return false;
    817  1.1  kamil 
    818  1.1  kamil   return true;
    819  1.1  kamil }
    820  1.1  kamil 
    821  1.1  kamil bool OverrideFunction(
    822  1.1  kamil     uptr old_func, uptr new_func, uptr *orig_old_func) {
    823  1.1  kamil #if !SANITIZER_WINDOWS64
    824  1.1  kamil   if (OverrideFunctionWithDetour(old_func, new_func, orig_old_func))
    825  1.1  kamil     return true;
    826  1.1  kamil #endif
    827  1.1  kamil   if (OverrideFunctionWithRedirectJump(old_func, new_func, orig_old_func))
    828  1.1  kamil     return true;
    829  1.1  kamil   if (OverrideFunctionWithHotPatch(old_func, new_func, orig_old_func))
    830  1.1  kamil     return true;
    831  1.1  kamil   if (OverrideFunctionWithTrampoline(old_func, new_func, orig_old_func))
    832  1.1  kamil     return true;
    833  1.1  kamil   return false;
    834  1.1  kamil }
    835  1.1  kamil 
    836  1.1  kamil static void **InterestingDLLsAvailable() {
    837  1.1  kamil   static const char *InterestingDLLs[] = {
    838  1.1  kamil       "kernel32.dll",
    839  1.1  kamil       "msvcr100.dll",      // VS2010
    840  1.1  kamil       "msvcr110.dll",      // VS2012
    841  1.1  kamil       "msvcr120.dll",      // VS2013
    842  1.1  kamil       "vcruntime140.dll",  // VS2015
    843  1.1  kamil       "ucrtbase.dll",      // Universal CRT
    844  1.1  kamil       // NTDLL should go last as it exports some functions that we should
    845  1.1  kamil       // override in the CRT [presumably only used internally].
    846  1.1  kamil       "ntdll.dll", NULL};
    847  1.1  kamil   static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
    848  1.1  kamil   if (!result[0]) {
    849  1.1  kamil     for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
    850  1.1  kamil       if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))
    851  1.1  kamil         result[j++] = (void *)h;
    852  1.1  kamil     }
    853  1.1  kamil   }
    854  1.1  kamil   return &result[0];
    855  1.1  kamil }
    856  1.1  kamil 
    857  1.1  kamil namespace {
    858  1.1  kamil // Utility for reading loaded PE images.
    859  1.1  kamil template <typename T> class RVAPtr {
    860  1.1  kamil  public:
    861  1.1  kamil   RVAPtr(void *module, uptr rva)
    862  1.1  kamil       : ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}
    863  1.1  kamil   operator T *() { return ptr_; }
    864  1.1  kamil   T *operator->() { return ptr_; }
    865  1.1  kamil   T *operator++() { return ++ptr_; }
    866  1.1  kamil 
    867  1.1  kamil  private:
    868  1.1  kamil   T *ptr_;
    869  1.1  kamil };
    870  1.1  kamil } // namespace
    871  1.1  kamil 
    872  1.1  kamil // Internal implementation of GetProcAddress. At least since Windows 8,
    873  1.1  kamil // GetProcAddress appears to initialize DLLs before returning function pointers
    874  1.1  kamil // into them. This is problematic for the sanitizers, because they typically
    875  1.1  kamil // want to intercept malloc *before* MSVCRT initializes. Our internal
    876  1.1  kamil // implementation walks the export list manually without doing initialization.
    877  1.1  kamil uptr InternalGetProcAddress(void *module, const char *func_name) {
    878  1.1  kamil   // Check that the module header is full and present.
    879  1.1  kamil   RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
    880  1.1  kamil   RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
    881  1.1  kamil   if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
    882  1.1  kamil       headers->Signature != IMAGE_NT_SIGNATURE ||           // "PE\0\0"
    883  1.1  kamil       headers->FileHeader.SizeOfOptionalHeader <
    884  1.1  kamil           sizeof(IMAGE_OPTIONAL_HEADER)) {
    885  1.1  kamil     return 0;
    886  1.1  kamil   }
    887  1.1  kamil 
    888  1.1  kamil   IMAGE_DATA_DIRECTORY *export_directory =
    889  1.1  kamil       &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
    890  1.1  kamil   if (export_directory->Size == 0)
    891  1.1  kamil     return 0;
    892  1.1  kamil   RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
    893  1.1  kamil                                          export_directory->VirtualAddress);
    894  1.1  kamil   RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
    895  1.1  kamil   RVAPtr<DWORD> names(module, exports->AddressOfNames);
    896  1.1  kamil   RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);
    897  1.1  kamil 
    898  1.1  kamil   for (DWORD i = 0; i < exports->NumberOfNames; i++) {
    899  1.1  kamil     RVAPtr<char> name(module, names[i]);
    900  1.1  kamil     if (!strcmp(func_name, name)) {
    901  1.1  kamil       DWORD index = ordinals[i];
    902  1.1  kamil       RVAPtr<char> func(module, functions[index]);
    903  1.1  kamil 
    904  1.1  kamil       // Handle forwarded functions.
    905  1.1  kamil       DWORD offset = functions[index];
    906  1.1  kamil       if (offset >= export_directory->VirtualAddress &&
    907  1.1  kamil           offset < export_directory->VirtualAddress + export_directory->Size) {
    908  1.1  kamil         // An entry for a forwarded function is a string with the following
    909  1.1  kamil         // format: "<module> . <function_name>" that is stored into the
    910  1.1  kamil         // exported directory.
    911  1.1  kamil         char function_name[256];
    912  1.1  kamil         size_t funtion_name_length = _strlen(func);
    913  1.1  kamil         if (funtion_name_length >= sizeof(function_name) - 1)
    914  1.1  kamil           InterceptionFailed();
    915  1.1  kamil 
    916  1.1  kamil         _memcpy(function_name, func, funtion_name_length);
    917  1.1  kamil         function_name[funtion_name_length] = '\0';
    918  1.1  kamil         char* separator = _strchr(function_name, '.');
    919  1.1  kamil         if (!separator)
    920  1.1  kamil           InterceptionFailed();
    921  1.1  kamil         *separator = '\0';
    922  1.1  kamil 
    923  1.1  kamil         void* redirected_module = GetModuleHandleA(function_name);
    924  1.1  kamil         if (!redirected_module)
    925  1.1  kamil           InterceptionFailed();
    926  1.1  kamil         return InternalGetProcAddress(redirected_module, separator + 1);
    927  1.1  kamil       }
    928  1.1  kamil 
    929  1.1  kamil       return (uptr)(char *)func;
    930  1.1  kamil     }
    931  1.1  kamil   }
    932  1.1  kamil 
    933  1.1  kamil   return 0;
    934  1.1  kamil }
    935  1.1  kamil 
    936  1.1  kamil bool OverrideFunction(
    937  1.1  kamil     const char *func_name, uptr new_func, uptr *orig_old_func) {
    938  1.1  kamil   bool hooked = false;
    939  1.1  kamil   void **DLLs = InterestingDLLsAvailable();
    940  1.1  kamil   for (size_t i = 0; DLLs[i]; ++i) {
    941  1.1  kamil     uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
    942  1.1  kamil     if (func_addr &&
    943  1.1  kamil         OverrideFunction(func_addr, new_func, orig_old_func)) {
    944  1.1  kamil       hooked = true;
    945  1.1  kamil     }
    946  1.1  kamil   }
    947  1.1  kamil   return hooked;
    948  1.1  kamil }
    949  1.1  kamil 
    950  1.1  kamil bool OverrideImportedFunction(const char *module_to_patch,
    951  1.1  kamil                               const char *imported_module,
    952  1.1  kamil                               const char *function_name, uptr new_function,
    953  1.1  kamil                               uptr *orig_old_func) {
    954  1.1  kamil   HMODULE module = GetModuleHandleA(module_to_patch);
    955  1.1  kamil   if (!module)
    956  1.1  kamil     return false;
    957  1.1  kamil 
    958  1.1  kamil   // Check that the module header is full and present.
    959  1.1  kamil   RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
    960  1.1  kamil   RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
    961  1.1  kamil   if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
    962  1.1  kamil       headers->Signature != IMAGE_NT_SIGNATURE ||            // "PE\0\0"
    963  1.1  kamil       headers->FileHeader.SizeOfOptionalHeader <
    964  1.1  kamil           sizeof(IMAGE_OPTIONAL_HEADER)) {
    965  1.1  kamil     return false;
    966  1.1  kamil   }
    967  1.1  kamil 
    968  1.1  kamil   IMAGE_DATA_DIRECTORY *import_directory =
    969  1.1  kamil       &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
    970  1.1  kamil 
    971  1.1  kamil   // Iterate the list of imported DLLs. FirstThunk will be null for the last
    972  1.1  kamil   // entry.
    973  1.1  kamil   RVAPtr<IMAGE_IMPORT_DESCRIPTOR> imports(module,
    974  1.1  kamil                                           import_directory->VirtualAddress);
    975  1.1  kamil   for (; imports->FirstThunk != 0; ++imports) {
    976  1.1  kamil     RVAPtr<const char> modname(module, imports->Name);
    977  1.1  kamil     if (_stricmp(&*modname, imported_module) == 0)
    978  1.1  kamil       break;
    979  1.1  kamil   }
    980  1.1  kamil   if (imports->FirstThunk == 0)
    981  1.1  kamil     return false;
    982  1.1  kamil 
    983  1.1  kamil   // We have two parallel arrays: the import address table (IAT) and the table
    984  1.1  kamil   // of names. They start out containing the same data, but the loader rewrites
    985  1.1  kamil   // the IAT to hold imported addresses and leaves the name table in
    986  1.1  kamil   // OriginalFirstThunk alone.
    987  1.1  kamil   RVAPtr<IMAGE_THUNK_DATA> name_table(module, imports->OriginalFirstThunk);
    988  1.1  kamil   RVAPtr<IMAGE_THUNK_DATA> iat(module, imports->FirstThunk);
    989  1.1  kamil   for (; name_table->u1.Ordinal != 0; ++name_table, ++iat) {
    990  1.1  kamil     if (!IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
    991  1.1  kamil       RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(
    992  1.1  kamil           module, name_table->u1.ForwarderString);
    993  1.1  kamil       const char *funcname = &import_by_name->Name[0];
    994  1.1  kamil       if (strcmp(funcname, function_name) == 0)
    995  1.1  kamil         break;
    996  1.1  kamil     }
    997  1.1  kamil   }
    998  1.1  kamil   if (name_table->u1.Ordinal == 0)
    999  1.1  kamil     return false;
   1000  1.1  kamil 
   1001  1.1  kamil   // Now we have the correct IAT entry. Do the swap. We have to make the page
   1002  1.1  kamil   // read/write first.
   1003  1.1  kamil   if (orig_old_func)
   1004  1.1  kamil     *orig_old_func = iat->u1.AddressOfData;
   1005  1.1  kamil   DWORD old_prot, unused_prot;
   1006  1.1  kamil   if (!VirtualProtect(&iat->u1.AddressOfData, 4, PAGE_EXECUTE_READWRITE,
   1007  1.1  kamil                       &old_prot))
   1008  1.1  kamil     return false;
   1009  1.1  kamil   iat->u1.AddressOfData = new_function;
   1010  1.1  kamil   if (!VirtualProtect(&iat->u1.AddressOfData, 4, old_prot, &unused_prot))
   1011  1.1  kamil     return false;  // Not clear if this failure bothers us.
   1012  1.1  kamil   return true;
   1013  1.1  kamil }
   1014  1.1  kamil 
   1015  1.1  kamil }  // namespace __interception
   1016  1.1  kamil 
   1017  1.1  kamil #endif  // SANITIZER_MAC
   1018