HomeSort by: relevance | last modified time | path
    Searched defs:epoch (Results 1 - 15 of 15) sorted by relevancy

  /src/sys/external/bsd/common/linux/
linux_srcu.c 45 * All new srcu read sections get counted in the active epoch.
47 * epoch has zero readers. When a thread calls synchronize_srcu,
50 * number of readers in the now-draining epoch, and waits for the
58 * epoch.
135 unsigned epoch = gen & 1; /* active epoch */ local in function:srcu_adjust
138 cpu->src_count[epoch] += delta;
232 * readers on this CPU in the inactive epoch to the global count
240 unsigned gen, epoch; local in function:synchronize_srcu_xc
247 epoch = 1 ^ (gen & 1); /* draining epoch *
    [all...]
  /src/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/
tsan_mutexset.h 28 u64 epoch; member in struct:__tsan::MutexSet::Desc
35 void Add(u64 id, bool write, u64 epoch);
60 void MutexSet::Add(u64 id, bool write, u64 epoch) {}
tsan_clock.h 69 u64 epoch : kClkBits; member in struct:__tsan::SyncClock::Dirty
tsan_clock.cc 147 if (clk_[tid] < dirty.epoch) {
148 clk_[tid] = dirty.epoch;
161 u64 epoch = src_elem.epoch; local in function:__tsan::ThreadClock::acquire
162 if (*dst_pos < epoch) {
163 *dst_pos = epoch;
200 if (dst->elem(tid_).epoch > last_acquire_) {
219 ce.epoch = max(ce.epoch, clk_[i]);
257 dst->dirty_[0].epoch = clk_[tid_]
    [all...]
tsan_defs.h 45 u64 epoch : kClkBits; member in struct:__tsan::ClockElem
tsan_rtl_mutex.cc 119 RestoreStack(last.tid(), last.epoch(), &trace, 0);
192 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
287 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
418 u64 epoch = tctx->epoch1; local in function:__tsan::UpdateClockCallback
420 epoch = tctx->thr->fast_state.epoch();
421 thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
439 // Can't increment epoch w/o writing to the trace as well.
451 // Can't increment epoch w/o writing to the trace as well.
461 u64 epoch = tctx->epoch1 local in function:__tsan::UpdateSleepClockCallback
    [all...]
tsan_rtl.cc 113 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
117 : fast_state(tid, epoch)
562 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
564 hdr->epoch0 = thr->fast_state.epoch();
629 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
681 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
717 old.epoch() > sync_epoch &&
759 // epoch[0:31] = sync_epoch[0:31]
760 // epoch[32:63] = sync_epoch[0:31]
761 // epoch[64:95] = sync_epoch[0:31
763 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); local in function:__tsan::ContainsSameAccessFast
    [all...]
tsan_rtl.h 105 // epoch : kClkBits
108 FastState(u64 tid, u64 epoch) {
110 x_ |= epoch;
112 DCHECK_EQ(epoch, this->epoch());
134 u64 epoch() const { function in class:__tsan::FastState
140 u64 old_epoch = epoch();
142 DCHECK_EQ(old_epoch + 1, epoch());
170 return epoch() & mask;
190 // epoch : kClkBit
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/
i915_globals.c 28 static atomic_t epoch; variable in typeref:typename:atomic_t
34 int epoch; member in struct:park_work
58 park.epoch = atomic_inc_return(&epoch);
70 if (park.epoch != atomic_read(&epoch)) {
145 atomic_inc(&epoch);
  /src/sys/crypto/cprng_fast/
cprng_fast.c 58 unsigned epoch;
90 cprng->epoch = entropy_epoch();
111 if (__predict_false(cprng->epoch != entropy_epoch()))
137 unsigned epoch = entropy_epoch();
147 cprng->epoch = epoch;
57 unsigned epoch; member in struct:cprng_fast
135 unsigned epoch = entropy_epoch(); local in function:cprng_fast_intr
  /src/sys/kern/
subr_cprng.c 251 /* Set the epoch uninitialized so we reseed on first use. */
269 cprng_strong_reseed(struct cprng_strong *cprng, unsigned epoch,
280 * big deal -- worst case, we rewind the entropy epoch here and
295 (*ccp)->cc_epoch = epoch;
302 unsigned epoch; local in function:cprng_strong
323 /* If the entropy epoch has changed, (re)seed. */
324 epoch = entropy_epoch();
325 if (__predict_false(epoch != cc->cc_epoch))
326 cprng_strong_reseed(cprng, epoch, &cc, &s);
331 cprng_strong_reseed(cprng, epoch, &cc, &s)
    [all...]
kern_entropy.c 59 * * The entropy epoch is the number that changes when we
189 unsigned epoch; /* (A) changes when needed -> 0 */ member in struct:__anon5258958b0108
202 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */
404 "epoch", SYSCTL_DESCR("Entropy epoch"),
405 NULL, 0, &E->epoch, 0, KERN_ENTROPY_EPOCH, CTL_EOL);
667 * Returns the current entropy epoch. If this changes, you should
677 * unsigned epoch;
680 * unsigned epoch = entropy_epoch();
681 * if (__predict_false(epoch != foo->epoch))
1310 unsigned epoch; local in function:entropy_notify
    [all...]
  /src/sys/external/bsd/compiler_rt/dist/lib/tsan/tests/unit/
tsan_clock_test.cc 73 ASSERT_EQ(sync.get_clean(i), ce.epoch);
428 u64 epoch = thr0[tid]->clock[tid] + 1; local in function:__tsan::ClockFuzzer
432 thr0[tid]->clock[tid] = epoch;
435 thr1[tid]->set(epoch);
  /src/sbin/routed/
main.c 74 struct timeval epoch; /* when started */ variable in typeref:struct:timeval
84 EPOCH+SUPPLY_INTERVAL, 0
127 epoch = clk;
128 epoch.tv_sec -= EPOCH;
129 now.tv_sec = EPOCH;
130 now_stale = EPOCH - STALE_TIME;
131 now_expire = EPOCH - EXPIRE_TIME;
132 now_garbage = EPOCH - GARBAGE_TIME;
330 intvl_random(&next_bcast, EPOCH+MIN_WAITTIME, EPOCH+SUPPLY_INTERVAL)
    [all...]
  /src/lib/libc/gen/
arc4random.c 497 * Return the current entropy epoch, from the sysctl node
498 * kern.entropy.epoch.
500 * The entropy epoch is never zero. Initially, or on error, it is
514 unsigned epoch = (unsigned)-1; local in function:entropy_epoch
515 size_t epochlen = sizeof(epoch);
517 if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1)
519 if (epochlen != sizeof(epoch))
522 return epoch;
535 unsigned epoch = entropy_epoch(); local in function:arc4random_prng_addrandom
559 prng->arc4_epoch = epoch;
    [all...]

Completed in 44 milliseconds