Home | History | Annotate | Line # | Download | only in internal
      1 /*
      2  * Copyright 2016-2019 The OpenSSL Project Authors. All Rights Reserved.
      3  *
      4  * Licensed under the OpenSSL license (the "License").  You may not use
      5  * this file except in compliance with the License.  You can obtain a copy
      6  * in the file LICENSE in the source distribution or at
      7  * https://www.openssl.org/source/license.html
      8  */
      9 #ifndef OSSL_INTERNAL_REFCOUNT_H
     10 # define OSSL_INTERNAL_REFCOUNT_H
     11 
     12 /* Used to checking reference counts, most while doing perl5 stuff :-) */
     13 # if defined(OPENSSL_NO_STDIO)
     14 #  if defined(REF_PRINT)
     15 #   error "REF_PRINT requires stdio"
     16 #  endif
     17 # endif
     18 
     19 # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
     20      && !defined(__STDC_NO_ATOMICS__) && !defined(__lint__)
     21 #  include <stdatomic.h>
     22 #  define HAVE_C11_ATOMICS
     23 # endif
     24 
     25 # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
     26      && ATOMIC_INT_LOCK_FREE > 0
     27 
     28 #  define HAVE_ATOMICS 1
     29 
     30 typedef _Atomic int CRYPTO_REF_COUNT;
     31 
     32 static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, void *lock)
     33 {
     34     *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
     35     return 1;
     36 }
     37 
     38 /*
     39  * Changes to shared structure other than reference counter have to be
     40  * serialized. And any kind of serialization implies a release fence. This
     41  * means that by the time reference counter is decremented all other
     42  * changes are visible on all processors. Hence decrement itself can be
     43  * relaxed. In case it hits zero, object will be destructed. Since it's
     44  * last use of the object, destructor programmer might reason that access
     45  * to mutable members doesn't have to be serialized anymore, which would
     46  * otherwise imply an acquire fence. Hence conditional acquire fence...
     47  */
     48 static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock)
     49 {
     50     *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
     51     if (*ret == 0)
     52         atomic_thread_fence(memory_order_acquire);
     53     return 1;
     54 }
     55 
     56 # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
     57 
     58 #  define HAVE_ATOMICS 1
     59 
     60 typedef int CRYPTO_REF_COUNT;
     61 
     62 static __inline__ int CRYPTO_UP_REF(int *val, int *ret, void *lock)
     63 {
     64     *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
     65     return 1;
     66 }
     67 
     68 static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret, void *lock)
     69 {
     70     *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
     71     if (*ret == 0)
     72         __atomic_thread_fence(__ATOMIC_ACQUIRE);
     73     return 1;
     74 }
     75 
     76 # elif defined(_MSC_VER) && _MSC_VER>=1200
     77 
     78 #  define HAVE_ATOMICS 1
     79 
     80 typedef volatile int CRYPTO_REF_COUNT;
     81 
     82 #  if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
     83 #   include <intrin.h>
     84 #   if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
     85 #    define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
     86 #   endif
     87 
     88 static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
     89 {
     90     *ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
     91     return 1;
     92 }
     93 
     94 static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
     95 {
     96     *ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
     97     if (*ret == 0)
     98         __dmb(_ARM_BARRIER_ISH);
     99     return 1;
    100 }
    101 #  else
    102 #   if !defined(_WIN32_WCE)
    103 #    pragma intrinsic(_InterlockedExchangeAdd)
    104 #   else
    105 #    if _WIN32_WCE >= 0x600
    106       extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
    107 #    else
    108       /* under Windows CE we still have old-style Interlocked* functions */
    109       extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
    110 #     define _InterlockedExchangeAdd InterlockedExchangeAdd
    111 #    endif
    112 #   endif
    113 
    114 static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
    115 {
    116     *ret = _InterlockedExchangeAdd(val, 1) + 1;
    117     return 1;
    118 }
    119 
    120 static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
    121 {
    122     *ret = _InterlockedExchangeAdd(val, -1) - 1;
    123     return 1;
    124 }
    125 #  endif
    126 
    127 # else
    128 
    129 typedef int CRYPTO_REF_COUNT;
    130 
    131 # define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock)
    132 # define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock)
    133 
    134 # endif
    135 
    136 # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
    137 #  define REF_ASSERT_ISNT(test) \
    138     (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
    139 # else
    140 #  define REF_ASSERT_ISNT(i)
    141 # endif
    142 
    143 # ifdef REF_PRINT
    144 #  define REF_PRINT_COUNT(a, b) \
    145         fprintf(stderr, "%p:%4d:%s\n", b, b->references, a)
    146 # else
    147 #  define REF_PRINT_COUNT(a, b)
    148 # endif
    149 
    150 #endif
    151