tsan_shadow.h revision 1.1 1 1.1 mrg //===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
2 1.1 mrg //
3 1.1 mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 1.1 mrg // See https://llvm.org/LICENSE.txt for license information.
5 1.1 mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 1.1 mrg //
7 1.1 mrg //===----------------------------------------------------------------------===//
8 1.1 mrg
9 1.1 mrg #ifndef TSAN_SHADOW_H
10 1.1 mrg #define TSAN_SHADOW_H
11 1.1 mrg
12 1.1 mrg #include "tsan_defs.h"
13 1.1 mrg #include "tsan_trace.h"
14 1.1 mrg
15 1.1 mrg namespace __tsan {
16 1.1 mrg
17 1.1 mrg // FastState (from most significant bit):
18 1.1 mrg // ignore : 1
19 1.1 mrg // tid : kTidBits
20 1.1 mrg // unused : -
21 1.1 mrg // history_size : 3
22 1.1 mrg // epoch : kClkBits
23 1.1 mrg class FastState {
24 1.1 mrg public:
25 1.1 mrg FastState(u64 tid, u64 epoch) {
26 1.1 mrg x_ = tid << kTidShift;
27 1.1 mrg x_ |= epoch;
28 1.1 mrg DCHECK_EQ(tid, this->tid());
29 1.1 mrg DCHECK_EQ(epoch, this->epoch());
30 1.1 mrg DCHECK_EQ(GetIgnoreBit(), false);
31 1.1 mrg }
32 1.1 mrg
33 1.1 mrg explicit FastState(u64 x) : x_(x) {}
34 1.1 mrg
35 1.1 mrg u64 raw() const { return x_; }
36 1.1 mrg
37 1.1 mrg u64 tid() const {
38 1.1 mrg u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
39 1.1 mrg return res;
40 1.1 mrg }
41 1.1 mrg
42 1.1 mrg u64 TidWithIgnore() const {
43 1.1 mrg u64 res = x_ >> kTidShift;
44 1.1 mrg return res;
45 1.1 mrg }
46 1.1 mrg
47 1.1 mrg u64 epoch() const {
48 1.1 mrg u64 res = x_ & ((1ull << kClkBits) - 1);
49 1.1 mrg return res;
50 1.1 mrg }
51 1.1 mrg
52 1.1 mrg void IncrementEpoch() {
53 1.1 mrg u64 old_epoch = epoch();
54 1.1 mrg x_ += 1;
55 1.1 mrg DCHECK_EQ(old_epoch + 1, epoch());
56 1.1 mrg (void)old_epoch;
57 1.1 mrg }
58 1.1 mrg
59 1.1 mrg void SetIgnoreBit() { x_ |= kIgnoreBit; }
60 1.1 mrg void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
61 1.1 mrg bool GetIgnoreBit() const { return (s64)x_ < 0; }
62 1.1 mrg
63 1.1 mrg void SetHistorySize(int hs) {
64 1.1 mrg CHECK_GE(hs, 0);
65 1.1 mrg CHECK_LE(hs, 7);
66 1.1 mrg x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
67 1.1 mrg }
68 1.1 mrg
69 1.1 mrg ALWAYS_INLINE
70 1.1 mrg int GetHistorySize() const {
71 1.1 mrg return (int)((x_ >> kHistoryShift) & kHistoryMask);
72 1.1 mrg }
73 1.1 mrg
74 1.1 mrg void ClearHistorySize() { SetHistorySize(0); }
75 1.1 mrg
76 1.1 mrg ALWAYS_INLINE
77 1.1 mrg u64 GetTracePos() const {
78 1.1 mrg const int hs = GetHistorySize();
79 1.1 mrg // When hs == 0, the trace consists of 2 parts.
80 1.1 mrg const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
81 1.1 mrg return epoch() & mask;
82 1.1 mrg }
83 1.1 mrg
84 1.1 mrg private:
85 1.1 mrg friend class Shadow;
86 1.1 mrg static const int kTidShift = 64 - kTidBits - 1;
87 1.1 mrg static const u64 kIgnoreBit = 1ull << 63;
88 1.1 mrg static const u64 kFreedBit = 1ull << 63;
89 1.1 mrg static const u64 kHistoryShift = kClkBits;
90 1.1 mrg static const u64 kHistoryMask = 7;
91 1.1 mrg u64 x_;
92 1.1 mrg };
93 1.1 mrg
94 1.1 mrg // Shadow (from most significant bit):
95 1.1 mrg // freed : 1
96 1.1 mrg // tid : kTidBits
97 1.1 mrg // is_atomic : 1
98 1.1 mrg // is_read : 1
99 1.1 mrg // size_log : 2
100 1.1 mrg // addr0 : 3
101 1.1 mrg // epoch : kClkBits
102 1.1 mrg class Shadow : public FastState {
103 1.1 mrg public:
104 1.1 mrg explicit Shadow(u64 x) : FastState(x) {}
105 1.1 mrg
106 1.1 mrg explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); }
107 1.1 mrg
108 1.1 mrg void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
109 1.1 mrg DCHECK_EQ((x_ >> kClkBits) & 31, 0);
110 1.1 mrg DCHECK_LE(addr0, 7);
111 1.1 mrg DCHECK_LE(kAccessSizeLog, 3);
112 1.1 mrg x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
113 1.1 mrg DCHECK_EQ(kAccessSizeLog, size_log());
114 1.1 mrg DCHECK_EQ(addr0, this->addr0());
115 1.1 mrg }
116 1.1 mrg
117 1.1 mrg void SetWrite(unsigned kAccessIsWrite) {
118 1.1 mrg DCHECK_EQ(x_ & kReadBit, 0);
119 1.1 mrg if (!kAccessIsWrite)
120 1.1 mrg x_ |= kReadBit;
121 1.1 mrg DCHECK_EQ(kAccessIsWrite, IsWrite());
122 1.1 mrg }
123 1.1 mrg
124 1.1 mrg void SetAtomic(bool kIsAtomic) {
125 1.1 mrg DCHECK(!IsAtomic());
126 1.1 mrg if (kIsAtomic)
127 1.1 mrg x_ |= kAtomicBit;
128 1.1 mrg DCHECK_EQ(IsAtomic(), kIsAtomic);
129 1.1 mrg }
130 1.1 mrg
131 1.1 mrg bool IsAtomic() const { return x_ & kAtomicBit; }
132 1.1 mrg
133 1.1 mrg bool IsZero() const { return x_ == 0; }
134 1.1 mrg
135 1.1 mrg static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
136 1.1 mrg u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
137 1.1 mrg DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
138 1.1 mrg return shifted_xor == 0;
139 1.1 mrg }
140 1.1 mrg
141 1.1 mrg static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1,
142 1.1 mrg const Shadow s2) {
143 1.1 mrg u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
144 1.1 mrg return masked_xor == 0;
145 1.1 mrg }
146 1.1 mrg
147 1.1 mrg static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
148 1.1 mrg unsigned kS2AccessSize) {
149 1.1 mrg bool res = false;
150 1.1 mrg u64 diff = s1.addr0() - s2.addr0();
151 1.1 mrg if ((s64)diff < 0) { // s1.addr0 < s2.addr0
152 1.1 mrg // if (s1.addr0() + size1) > s2.addr0()) return true;
153 1.1 mrg if (s1.size() > -diff)
154 1.1 mrg res = true;
155 1.1 mrg } else {
156 1.1 mrg // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
157 1.1 mrg if (kS2AccessSize > diff)
158 1.1 mrg res = true;
159 1.1 mrg }
160 1.1 mrg DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
161 1.1 mrg DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
162 1.1 mrg return res;
163 1.1 mrg }
164 1.1 mrg
165 1.1 mrg u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
166 1.1 mrg u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
167 1.1 mrg bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
168 1.1 mrg bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
169 1.1 mrg
170 1.1 mrg // The idea behind the freed bit is as follows.
171 1.1 mrg // When the memory is freed (or otherwise unaccessible) we write to the shadow
172 1.1 mrg // values with tid/epoch related to the free and the freed bit set.
173 1.1 mrg // During memory accesses processing the freed bit is considered
174 1.1 mrg // as msb of tid. So any access races with shadow with freed bit set
175 1.1 mrg // (it is as if write from a thread with which we never synchronized before).
176 1.1 mrg // This allows us to detect accesses to freed memory w/o additional
177 1.1 mrg // overheads in memory access processing and at the same time restore
178 1.1 mrg // tid/epoch of free.
179 1.1 mrg void MarkAsFreed() { x_ |= kFreedBit; }
180 1.1 mrg
181 1.1 mrg bool IsFreed() const { return x_ & kFreedBit; }
182 1.1 mrg
183 1.1 mrg bool GetFreedAndReset() {
184 1.1 mrg bool res = x_ & kFreedBit;
185 1.1 mrg x_ &= ~kFreedBit;
186 1.1 mrg return res;
187 1.1 mrg }
188 1.1 mrg
189 1.1 mrg bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
190 1.1 mrg bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) |
191 1.1 mrg (u64(kIsAtomic) << kAtomicShift));
192 1.1 mrg DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
193 1.1 mrg return v;
194 1.1 mrg }
195 1.1 mrg
196 1.1 mrg bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
197 1.1 mrg bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
198 1.1 mrg DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
199 1.1 mrg (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
200 1.1 mrg return v;
201 1.1 mrg }
202 1.1 mrg
203 1.1 mrg bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
204 1.1 mrg bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
205 1.1 mrg DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
206 1.1 mrg (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
207 1.1 mrg return v;
208 1.1 mrg }
209 1.1 mrg
210 1.1 mrg private:
211 1.1 mrg static const u64 kReadShift = 5 + kClkBits;
212 1.1 mrg static const u64 kReadBit = 1ull << kReadShift;
213 1.1 mrg static const u64 kAtomicShift = 6 + kClkBits;
214 1.1 mrg static const u64 kAtomicBit = 1ull << kAtomicShift;
215 1.1 mrg
216 1.1 mrg u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
217 1.1 mrg
218 1.1 mrg static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
219 1.1 mrg if (s1.addr0() == s2.addr0())
220 1.1 mrg return true;
221 1.1 mrg if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
222 1.1 mrg return true;
223 1.1 mrg if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
224 1.1 mrg return true;
225 1.1 mrg return false;
226 1.1 mrg }
227 1.1 mrg };
228 1.1 mrg
229 1.1 mrg const RawShadow kShadowRodata = (RawShadow)-1; // .rodata shadow marker
230 1.1 mrg
231 1.1 mrg } // namespace __tsan
232 1.1 mrg
233 1.1 mrg #endif
234