Home | History | Annotate | Line # | Download | only in sodium
      1 #ifndef __STDC_WANT_LIB_EXT1__
      2 # define __STDC_WANT_LIB_EXT1__ 1
      3 #endif
      4 #include <assert.h>
      5 #include <errno.h>
      6 #include <limits.h>
      7 #include <signal.h>
      8 #include <stddef.h>
      9 #include <stdint.h>
     10 #include <stdlib.h>
     11 #include <string.h>
     12 
     13 #ifdef HAVE_SYS_MMAN_H
     14 # include <sys/mman.h>
     15 #endif
     16 
     17 #ifdef _WIN32
     18 # include <windows.h>
     19 # include <wincrypt.h>
     20 #else
     21 # include <unistd.h>
     22 #endif
     23 
     24 #ifndef HAVE_C_VARARRAYS
     25 # ifdef HAVE_ALLOCA_H
     26 #  include <alloca.h>
     27 # elif !defined(alloca)
     28 #  if defined(__GNUC__)
     29 #   define alloca __builtin_alloca
     30 #  elif defined _AIX
     31 #   define alloca __alloca
     32 #  elif defined _MSC_VER
     33 #   include <malloc.h>
     34 #   define alloca _alloca
     35 #  else
     36 #   include <stddef.h>
     37 #   ifdef  __cplusplus
     38 extern "C"
     39 #   endif
     40 void *alloca (size_t);
     41 #  endif
     42 # endif
     43 #endif
     44 
     45 #include "core.h"
     46 #include "randombytes.h"
     47 #include "utils.h"
     48 
     49 #ifndef ENOSYS
     50 # define ENOSYS ENXIO
     51 #endif
     52 
     53 #if defined(_WIN32) && \
     54     (!defined(WINAPI_FAMILY) || WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP)
     55 # define WINAPI_DESKTOP
     56 #endif
     57 
     58 #define CANARY_SIZE 16U
     59 #define GARBAGE_VALUE 0xdb
     60 
     61 #ifndef MAP_NOCORE
     62 # define MAP_NOCORE 0
     63 #endif
     64 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
     65 # define MAP_ANON MAP_ANONYMOUS
     66 #endif
     67 #if defined(WINAPI_DESKTOP) || (defined(MAP_ANON) && defined(HAVE_MMAP)) || \
     68     defined(HAVE_POSIX_MEMALIGN)
     69 # define HAVE_ALIGNED_MALLOC
     70 #endif
     71 #if defined(HAVE_MPROTECT) && \
     72     !(defined(PROT_NONE) && defined(PROT_READ) && defined(PROT_WRITE))
     73 # undef HAVE_MPROTECT
     74 #endif
     75 #if defined(HAVE_ALIGNED_MALLOC) && \
     76     (defined(WINAPI_DESKTOP) || defined(HAVE_MPROTECT))
     77 # define HAVE_PAGE_PROTECTION
     78 #endif
     79 #if !defined(MADV_DODUMP) && defined(MADV_CORE)
     80 # define MADV_DODUMP   MADV_CORE
     81 # define MADV_DONTDUMP MADV_NOCORE
     82 #endif
     83 
     84 static size_t        page_size;
     85 static unsigned char canary[CANARY_SIZE];
     86 
     87 /* LCOV_EXCL_START */
     88 #ifdef HAVE_WEAK_SYMBOLS
     89 __attribute__((weak)) void
     90 _sodium_dummy_symbol_to_prevent_memzero_lto(void *const  pnt,
     91                                             const size_t len);
     92 __attribute__((weak)) void
     93 _sodium_dummy_symbol_to_prevent_memzero_lto(void *const  pnt,
     94                                             const size_t len)
     95 {
     96     (void) pnt; /* LCOV_EXCL_LINE */
     97     (void) len; /* LCOV_EXCL_LINE */
     98 }
     99 #endif
    100 /* LCOV_EXCL_STOP */
    101 
    102 void
    103 sodium_memzero(void *const pnt, const size_t len)
    104 {
    105 #ifdef _WIN32
    106     SecureZeroMemory(pnt, len);
    107 #elif defined(HAVE_MEMSET_S)
    108     if (len > 0U && memset_s(pnt, (rsize_t) len, 0, (rsize_t) len) != 0) {
    109         sodium_misuse(); /* LCOV_EXCL_LINE */
    110     }
    111 #elif defined(HAVE_EXPLICIT_BZERO)
    112     explicit_bzero(pnt, len);
    113 #elif HAVE_WEAK_SYMBOLS
    114     memset(pnt, 0, len);
    115     _sodium_dummy_symbol_to_prevent_memzero_lto(pnt, len);
    116 # ifdef HAVE_AMD64_ASM
    117     __asm__ __volatile__ ("" : : "p"(pnt));
    118 # endif
    119 #else
    120     volatile unsigned char *volatile pnt_ =
    121         (volatile unsigned char *volatile) pnt;
    122     size_t i = (size_t) 0U;
    123 
    124     while (i < len) {
    125         pnt_[i++] = 0U;
    126     }
    127 #endif
    128 }
    129 
    130 void
    131 sodium_stackzero(const size_t len)
    132 {
    133 #ifdef HAVE_C_VARARRAYS
    134     unsigned char fodder[len];
    135     sodium_memzero(fodder, len);
    136 #elif HAVE_ALLOCA
    137     sodium_memzero(alloca(len), len);
    138 #endif
    139 }
    140 
    141 #ifdef HAVE_WEAK_SYMBOLS
    142 __attribute__((weak)) void
    143 _sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char *b1,
    144                                            const unsigned char *b2,
    145                                            const size_t         len);
    146 __attribute__((weak)) void
    147 _sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char *b1,
    148                                            const unsigned char *b2,
    149                                            const size_t         len)
    150 {
    151     (void) b1;
    152     (void) b2;
    153     (void) len;
    154 }
    155 #endif
    156 
    157 int
    158 sodium_memcmp(const void *const b1_, const void *const b2_, size_t len)
    159 {
    160 #ifdef HAVE_WEAK_SYMBOLS
    161     const unsigned char *b1 = (const unsigned char *) b1_;
    162     const unsigned char *b2 = (const unsigned char *) b2_;
    163 #else
    164     const volatile unsigned char *volatile b1 =
    165         (const volatile unsigned char *volatile) b1_;
    166     const volatile unsigned char *volatile b2 =
    167         (const volatile unsigned char *volatile) b2_;
    168 #endif
    169     size_t                 i;
    170     volatile unsigned char d = 0U;
    171 
    172 #if HAVE_WEAK_SYMBOLS
    173     _sodium_dummy_symbol_to_prevent_memcmp_lto(b1, b2, len);
    174 #endif
    175     for (i = 0U; i < len; i++) {
    176         d |= b1[i] ^ b2[i];
    177     }
    178     return (1 & ((d - 1) >> 8)) - 1;
    179 }
    180 
    181 #ifdef HAVE_WEAK_SYMBOLS
    182 __attribute__((weak)) void
    183 _sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char *b1,
    184                                             const unsigned char *b2,
    185                                             const size_t         len);
    186 __attribute__((weak)) void
    187 _sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char *b1,
    188                                             const unsigned char *b2,
    189                                             const size_t         len)
    190 {
    191     (void) b1;
    192     (void) b2;
    193     (void) len;
    194 }
    195 #endif
    196 
    197 int
    198 sodium_compare(const unsigned char *b1_, const unsigned char *b2_, size_t len)
    199 {
    200 #ifdef HAVE_WEAK_SYMBOLS
    201     const unsigned char *b1 = b1_;
    202     const unsigned char *b2 = b2_;
    203 #else
    204     const volatile unsigned char *volatile b1 =
    205         (const volatile unsigned char *volatile) b1_;
    206     const volatile unsigned char *volatile b2 =
    207         (const volatile unsigned char *volatile) b2_;
    208 #endif
    209     size_t                 i;
    210     volatile unsigned char gt = 0U;
    211     volatile unsigned char eq = 1U;
    212     uint16_t               x1, x2;
    213 
    214 #if HAVE_WEAK_SYMBOLS
    215     _sodium_dummy_symbol_to_prevent_compare_lto(b1, b2, len);
    216 #endif
    217     i = len;
    218     while (i != 0U) {
    219         i--;
    220         x1 = b1[i];
    221         x2 = b2[i];
    222         gt |= ((x2 - x1) >> 8) & eq;
    223         eq &= ((x2 ^ x1) - 1) >> 8;
    224     }
    225     return (int) (gt + gt + eq) - 1;
    226 }
    227 
    228 int
    229 sodium_is_zero(const unsigned char *n, const size_t nlen)
    230 {
    231     size_t                 i;
    232     volatile unsigned char d = 0U;
    233 
    234     for (i = 0U; i < nlen; i++) {
    235         d |= n[i];
    236     }
    237     return 1 & ((d - 1) >> 8);
    238 }
    239 
    240 void
    241 sodium_increment(unsigned char *n, const size_t nlen)
    242 {
    243     size_t        i = 0U;
    244     uint_fast16_t c = 1U;
    245 
    246 #ifdef HAVE_AMD64_ASM
    247     uint64_t t64, t64_2;
    248     uint32_t t32;
    249 
    250     if (nlen == 12U) {
    251         __asm__ __volatile__(
    252             "xorq %[t64], %[t64] \n"
    253             "xorl %[t32], %[t32] \n"
    254             "stc \n"
    255             "adcq %[t64], (%[out]) \n"
    256             "adcl %[t32], 8(%[out]) \n"
    257             : [t64] "=&r"(t64), [t32] "=&r"(t32)
    258             : [out] "D"(n)
    259             : "memory", "flags", "cc");
    260         return;
    261     } else if (nlen == 24U) {
    262         __asm__ __volatile__(
    263             "movq $1, %[t64] \n"
    264             "xorq %[t64_2], %[t64_2] \n"
    265             "addq %[t64], (%[out]) \n"
    266             "adcq %[t64_2], 8(%[out]) \n"
    267             "adcq %[t64_2], 16(%[out]) \n"
    268             : [t64] "=&r"(t64), [t64_2] "=&r"(t64_2)
    269             : [out] "D"(n)
    270             : "memory", "flags", "cc");
    271         return;
    272     } else if (nlen == 8U) {
    273         __asm__ __volatile__("incq (%[out]) \n"
    274                              :
    275                              : [out] "D"(n)
    276                              : "memory", "flags", "cc");
    277         return;
    278     }
    279 #endif
    280     for (; i < nlen; i++) {
    281         c += (uint_fast16_t) n[i];
    282         n[i] = (unsigned char) c;
    283         c >>= 8;
    284     }
    285 }
    286 
    287 void
    288 sodium_add(unsigned char *a, const unsigned char *b, const size_t len)
    289 {
    290     size_t        i = 0U;
    291     uint_fast16_t c = 0U;
    292 
    293 #ifdef HAVE_AMD64_ASM
    294     uint64_t t64, t64_2, t64_3;
    295     uint32_t t32;
    296 
    297     if (len == 12U) {
    298         __asm__ __volatile__(
    299             "movq (%[in]), %[t64] \n"
    300             "movl 8(%[in]), %[t32] \n"
    301             "addq %[t64], (%[out]) \n"
    302             "adcl %[t32], 8(%[out]) \n"
    303             : [t64] "=&r"(t64), [t32] "=&r"(t32)
    304             : [in] "S"(b), [out] "D"(a)
    305             : "memory", "flags", "cc");
    306         return;
    307     } else if (len == 24U) {
    308         __asm__ __volatile__(
    309             "movq (%[in]), %[t64] \n"
    310             "movq 8(%[in]), %[t64_2] \n"
    311             "movq 16(%[in]), %[t64_3] \n"
    312             "addq %[t64], (%[out]) \n"
    313             "adcq %[t64_2], 8(%[out]) \n"
    314             "adcq %[t64_3], 16(%[out]) \n"
    315             : [t64] "=&r"(t64), [t64_2] "=&r"(t64_2), [t64_3] "=&r"(t64_3)
    316             : [in] "S"(b), [out] "D"(a)
    317             : "memory", "flags", "cc");
    318         return;
    319     } else if (len == 8U) {
    320         __asm__ __volatile__(
    321             "movq (%[in]), %[t64] \n"
    322             "addq %[t64], (%[out]) \n"
    323             : [t64] "=&r"(t64)
    324             : [in] "S"(b), [out] "D"(a)
    325             : "memory", "flags", "cc");
    326         return;
    327     }
    328 #endif
    329     for (; i < len; i++) {
    330         c += (uint_fast16_t) a[i] + (uint_fast16_t) b[i];
    331         a[i] = (unsigned char) c;
    332         c >>= 8;
    333     }
    334 }
    335 
    336 int
    337 _sodium_alloc_init(void)
    338 {
    339 #ifdef HAVE_ALIGNED_MALLOC
    340 # if defined(_SC_PAGESIZE)
    341     long page_size_ = sysconf(_SC_PAGESIZE);
    342     if (page_size_ > 0L) {
    343         page_size = (size_t) page_size_;
    344     }
    345 # elif defined(WINAPI_DESKTOP)
    346     SYSTEM_INFO si;
    347     GetSystemInfo(&si);
    348     page_size = (size_t) si.dwPageSize;
    349 # endif
    350     if (page_size < CANARY_SIZE || page_size < sizeof(size_t)) {
    351         sodium_misuse(); /* LCOV_EXCL_LINE */
    352     }
    353 #endif
    354     randombytes_buf(canary, sizeof canary);
    355 
    356     return 0;
    357 }
    358 
    359 int
    360 sodium_mlock(void *const addr, const size_t len)
    361 {
    362 #if defined(MADV_DONTDUMP) && defined(HAVE_MADVISE)
    363     (void) madvise(addr, len, MADV_DONTDUMP);
    364 #endif
    365 #ifdef HAVE_MLOCK
    366     return mlock(addr, len);
    367 #elif defined(WINAPI_DESKTOP)
    368     return -(VirtualLock(addr, len) == 0);
    369 #else
    370     errno = ENOSYS;
    371     return -1;
    372 #endif
    373 }
    374 
    375 int
    376 sodium_munlock(void *const addr, const size_t len)
    377 {
    378     sodium_memzero(addr, len);
    379 #if defined(MADV_DODUMP) && defined(HAVE_MADVISE)
    380     (void) madvise(addr, len, MADV_DODUMP);
    381 #endif
    382 #ifdef HAVE_MLOCK
    383     return munlock(addr, len);
    384 #elif defined(WINAPI_DESKTOP)
    385     return -(VirtualUnlock(addr, len) == 0);
    386 #else
    387     errno = ENOSYS;
    388     return -1;
    389 #endif
    390 }
    391 
    392 static int
    393 _mprotect_noaccess(void *ptr, size_t size)
    394 {
    395 #ifdef HAVE_MPROTECT
    396     return mprotect(ptr, size, PROT_NONE);
    397 #elif defined(WINAPI_DESKTOP)
    398     DWORD old;
    399     return -(VirtualProtect(ptr, size, PAGE_NOACCESS, &old) == 0);
    400 #else
    401     errno = ENOSYS;
    402     return -1;
    403 #endif
    404 }
    405 
    406 static int
    407 _mprotect_readonly(void *ptr, size_t size)
    408 {
    409 #ifdef HAVE_MPROTECT
    410     return mprotect(ptr, size, PROT_READ);
    411 #elif defined(WINAPI_DESKTOP)
    412     DWORD old;
    413     return -(VirtualProtect(ptr, size, PAGE_READONLY, &old) == 0);
    414 #else
    415     errno = ENOSYS;
    416     return -1;
    417 #endif
    418 }
    419 
    420 static int
    421 _mprotect_readwrite(void *ptr, size_t size)
    422 {
    423 #ifdef HAVE_MPROTECT
    424     return mprotect(ptr, size, PROT_READ | PROT_WRITE);
    425 #elif defined(WINAPI_DESKTOP)
    426     DWORD old;
    427     return -(VirtualProtect(ptr, size, PAGE_READWRITE, &old) == 0);
    428 #else
    429     errno = ENOSYS;
    430     return -1;
    431 #endif
    432 }
    433 
    434 #ifdef HAVE_ALIGNED_MALLOC
    435 
    436 __attribute__((noreturn)) static void
    437 _out_of_bounds(void)
    438 {
    439 # ifdef SIGSEGV
    440     raise(SIGSEGV);
    441 # elif defined(SIGKILL)
    442     raise(SIGKILL);
    443 # endif
    444     abort(); /* not something we want any higher-level API to catch */
    445 } /* LCOV_EXCL_LINE */
    446 
    447 static inline size_t
    448 _page_round(const size_t size)
    449 {
    450     const size_t page_mask = page_size - 1U;
    451 
    452     return (size + page_mask) & ~page_mask;
    453 }
    454 
    455 static __attribute__((malloc)) unsigned char *
    456 _alloc_aligned(const size_t size)
    457 {
    458     void *ptr;
    459 
    460 # if defined(MAP_ANON) && defined(HAVE_MMAP)
    461     if ((ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
    462                     MAP_ANON | MAP_PRIVATE | MAP_NOCORE, -1, 0)) ==
    463         MAP_FAILED) {
    464         ptr = NULL; /* LCOV_EXCL_LINE */
    465     }               /* LCOV_EXCL_LINE */
    466 # elif defined(HAVE_POSIX_MEMALIGN)
    467     if (posix_memalign(&ptr, page_size, size) != 0) {
    468         ptr = NULL; /* LCOV_EXCL_LINE */
    469     }               /* LCOV_EXCL_LINE */
    470 # elif defined(WINAPI_DESKTOP)
    471     ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
    472 # else
    473 #  error Bug
    474 # endif
    475     return (unsigned char *) ptr;
    476 }
    477 
    478 static void
    479 _free_aligned(unsigned char *const ptr, const size_t size)
    480 {
    481 # if defined(MAP_ANON) && defined(HAVE_MMAP)
    482     (void) munmap(ptr, size);
    483 # elif defined(HAVE_POSIX_MEMALIGN)
    484     free(ptr);
    485 # elif defined(WINAPI_DESKTOP)
    486     VirtualFree(ptr, 0U, MEM_RELEASE);
    487 # else
    488 #  error Bug
    489 #endif
    490 }
    491 
    492 static unsigned char *
    493 _unprotected_ptr_from_user_ptr(void *const ptr)
    494 {
    495     uintptr_t      unprotected_ptr_u;
    496     unsigned char *canary_ptr;
    497     size_t         page_mask;
    498 
    499     canary_ptr = ((unsigned char *) ptr) - sizeof canary;
    500     page_mask = page_size - 1U;
    501     unprotected_ptr_u = ((uintptr_t) canary_ptr & (uintptr_t) ~page_mask);
    502     if (unprotected_ptr_u <= page_size * 2U) {
    503         sodium_misuse(); /* LCOV_EXCL_LINE */
    504     }
    505     return (unsigned char *) unprotected_ptr_u;
    506 }
    507 
    508 #endif /* HAVE_ALIGNED_MALLOC */
    509 
    510 #ifndef HAVE_ALIGNED_MALLOC
    511 static __attribute__((malloc)) void *
    512 _sodium_malloc(const size_t size)
    513 {
    514     return malloc(size > (size_t) 0U ? size : (size_t) 1U);
    515 }
    516 #else
    517 static __attribute__((malloc)) void *
    518 _sodium_malloc(const size_t size)
    519 {
    520     void          *user_ptr;
    521     unsigned char *base_ptr;
    522     unsigned char *canary_ptr;
    523     unsigned char *unprotected_ptr;
    524     size_t         size_with_canary;
    525     size_t         total_size;
    526     size_t         unprotected_size;
    527 
    528     if (size >= (size_t) SIZE_MAX - page_size * 4U) {
    529         errno = ENOMEM;
    530         return NULL;
    531     }
    532     if (page_size <= sizeof canary || page_size < sizeof unprotected_size) {
    533         sodium_misuse(); /* LCOV_EXCL_LINE */
    534     }
    535     size_with_canary = (sizeof canary) + size;
    536     unprotected_size = _page_round(size_with_canary);
    537     total_size       = page_size + page_size + unprotected_size + page_size;
    538     if ((base_ptr = _alloc_aligned(total_size)) == NULL) {
    539         return NULL; /* LCOV_EXCL_LINE */
    540     }
    541     unprotected_ptr = base_ptr + page_size * 2U;
    542     _mprotect_noaccess(base_ptr + page_size, page_size);
    543 # ifndef HAVE_PAGE_PROTECTION
    544     memcpy(unprotected_ptr + unprotected_size, canary, sizeof canary);
    545 # endif
    546     _mprotect_noaccess(unprotected_ptr + unprotected_size, page_size);
    547     sodium_mlock(unprotected_ptr, unprotected_size);
    548     canary_ptr =
    549         unprotected_ptr + _page_round(size_with_canary) - size_with_canary;
    550     user_ptr = canary_ptr + sizeof canary;
    551     memcpy(canary_ptr, canary, sizeof canary);
    552     memcpy(base_ptr, &unprotected_size, sizeof unprotected_size);
    553     _mprotect_readonly(base_ptr, page_size);
    554     assert(_unprotected_ptr_from_user_ptr(user_ptr) == unprotected_ptr);
    555 
    556     return user_ptr;
    557 }
    558 #endif /* !HAVE_ALIGNED_MALLOC */
    559 
    560 __attribute__((malloc)) void *
    561 sodium_malloc(const size_t size)
    562 {
    563     void *ptr;
    564 
    565     if ((ptr = _sodium_malloc(size)) == NULL) {
    566         return NULL;
    567     }
    568     memset(ptr, (int) GARBAGE_VALUE, size);
    569 
    570     return ptr;
    571 }
    572 
    573 __attribute__((malloc)) void *
    574 sodium_allocarray(size_t count, size_t size)
    575 {
    576     size_t total_size;
    577 
    578     if (count > (size_t) 0U && size >= (size_t) SIZE_MAX / count) {
    579         errno = ENOMEM;
    580         return NULL;
    581     }
    582     total_size = count * size;
    583 
    584     return sodium_malloc(total_size);
    585 }
    586 
    587 #ifndef HAVE_ALIGNED_MALLOC
    588 void
    589 sodium_free(void *ptr)
    590 {
    591     free(ptr);
    592 }
    593 #else
    594 void
    595 sodium_free(void *ptr)
    596 {
    597     unsigned char *base_ptr;
    598     unsigned char *canary_ptr;
    599     unsigned char *unprotected_ptr;
    600     size_t         total_size;
    601     size_t         unprotected_size;
    602 
    603     if (ptr == NULL) {
    604         return;
    605     }
    606     canary_ptr      = ((unsigned char *) ptr) - sizeof canary;
    607     unprotected_ptr = _unprotected_ptr_from_user_ptr(ptr);
    608     base_ptr        = unprotected_ptr - page_size * 2U;
    609     memcpy(&unprotected_size, base_ptr, sizeof unprotected_size);
    610     total_size = page_size + page_size + unprotected_size + page_size;
    611     _mprotect_readwrite(base_ptr, total_size);
    612     if (sodium_memcmp(canary_ptr, canary, sizeof canary) != 0) {
    613         _out_of_bounds();
    614     }
    615 # ifndef HAVE_PAGE_PROTECTION
    616     if (sodium_memcmp(unprotected_ptr + unprotected_size, canary,
    617                       sizeof canary) != 0) {
    618         _out_of_bounds();
    619     }
    620 # endif
    621     sodium_munlock(unprotected_ptr, unprotected_size);
    622     _free_aligned(base_ptr, total_size);
    623 }
    624 #endif /* HAVE_ALIGNED_MALLOC */
    625 
    626 #ifndef HAVE_PAGE_PROTECTION
    627 static int
    628 _sodium_mprotect(void *ptr, int (*cb)(void *ptr, size_t size))
    629 {
    630     (void) ptr;
    631     (void) cb;
    632     errno = ENOSYS;
    633     return -1;
    634 }
    635 #else
    636 static int
    637 _sodium_mprotect(void *ptr, int (*cb)(void *ptr, size_t size))
    638 {
    639     unsigned char *base_ptr;
    640     unsigned char *unprotected_ptr;
    641     size_t         unprotected_size;
    642 
    643     unprotected_ptr = _unprotected_ptr_from_user_ptr(ptr);
    644     base_ptr        = unprotected_ptr - page_size * 2U;
    645     memcpy(&unprotected_size, base_ptr, sizeof unprotected_size);
    646 
    647     return cb(unprotected_ptr, unprotected_size);
    648 }
    649 #endif
    650 
    651 int
    652 sodium_mprotect_noaccess(void *ptr)
    653 {
    654     return _sodium_mprotect(ptr, _mprotect_noaccess);
    655 }
    656 
    657 int
    658 sodium_mprotect_readonly(void *ptr)
    659 {
    660     return _sodium_mprotect(ptr, _mprotect_readonly);
    661 }
    662 
    663 int
    664 sodium_mprotect_readwrite(void *ptr)
    665 {
    666     return _sodium_mprotect(ptr, _mprotect_readwrite);
    667 }
    668 
    669 int
    670 sodium_pad(size_t *padded_buflen_p, unsigned char *buf,
    671            size_t unpadded_buflen, size_t blocksize, size_t max_buflen)
    672 {
    673     unsigned char          *tail;
    674     size_t                  i;
    675     size_t                  xpadlen;
    676     size_t                  xpadded_len;
    677     volatile unsigned char  mask;
    678     unsigned char           barrier_mask;
    679 
    680     if (blocksize <= 0U) {
    681         return -1;
    682     }
    683     xpadlen = blocksize - 1U;
    684     if ((blocksize & (blocksize - 1U)) == 0U) {
    685         xpadlen -= unpadded_buflen & (blocksize - 1U);
    686     } else {
    687         xpadlen -= unpadded_buflen % blocksize;
    688     }
    689     if ((size_t) SIZE_MAX - unpadded_buflen <= xpadlen) {
    690         sodium_misuse();
    691     }
    692     xpadded_len = unpadded_buflen + xpadlen;
    693     if (xpadded_len >= max_buflen) {
    694         return -1;
    695     }
    696     tail = &buf[xpadded_len];
    697     if (padded_buflen_p != NULL) {
    698         *padded_buflen_p = xpadded_len + 1U;
    699     }
    700     mask = 0U;
    701     for (i = 0; i < blocksize; i++) {
    702         barrier_mask = (unsigned char) (((i ^ xpadlen) - 1U) >> 8);
    703         tail[-i] = (tail[-i] & mask) | (0x80 & barrier_mask);
    704         mask |= barrier_mask;
    705     }
    706     return 0;
    707 }
    708 
    709 int
    710 sodium_unpad(size_t *unpadded_buflen_p, const unsigned char *buf,
    711              size_t padded_buflen, size_t blocksize)
    712 {
    713     const unsigned char *tail;
    714     unsigned char        acc = 0U;
    715     unsigned char        c;
    716     unsigned char        valid = 0U;
    717     volatile size_t      pad_len = 0U;
    718     size_t               i;
    719     size_t               is_barrier;
    720 
    721     if (padded_buflen < blocksize || blocksize <= 0U) {
    722         return -1;
    723     }
    724     tail = &buf[padded_buflen - 1U];
    725 
    726     for (i = 0U; i < blocksize; i++) {
    727         c = tail[-i];
    728         is_barrier =
    729             (( (acc - 1U) & (pad_len - 1U) & ((c ^ 0x80) - 1U) ) >> 8) & 1U;
    730         acc |= c;
    731         pad_len |= i & (1U + ~is_barrier);
    732         valid |= (unsigned char) is_barrier;
    733     }
    734     *unpadded_buflen_p = padded_buflen - 1U - pad_len;
    735 
    736     return (int) (valid - 1U);
    737 }
    738