Lines Matching refs:ec
478 struct entropy_cpu *ec = ptr;
481 ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP);
482 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
483 ec->ec_bitspending = 0;
484 ec->ec_samplespending = 0;
485 ec->ec_locked = false;
489 evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL,
491 evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL,
493 evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL,
505 struct entropy_cpu *ec = ptr;
513 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
515 evcnt_detach(&ec->ec_evcnt->intrtrunc);
516 evcnt_detach(&ec->ec_evcnt->intrdrop);
517 evcnt_detach(&ec->ec_evcnt->softint);
519 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
520 kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt));
524 * ec = entropy_cpu_get(&lock)
525 * entropy_cpu_put(&lock, ec)
538 struct entropy_cpu *ec;
540 ec = percpu_getref(entropy_percpu);
542 KASSERT(!ec->ec_locked);
543 ec->ec_locked = true;
547 return ec;
551 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
554 KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu()));
555 KASSERT(ec->ec_locked);
559 ec->ec_locked = false;
713 * entropy_account_cpu(ec)
727 * access to ec. Will acquire and release the global lock.
730 entropy_account_cpu(struct entropy_cpu *ec)
755 KASSERT(ec0 == ec);
757 if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) {
759 } else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) {
771 entpool_extract(ec->ec_pool, buf, sizeof buf);
773 atomic_store_relaxed(&ec->ec_bitspending, 0);
774 atomic_store_relaxed(&ec->ec_samplespending, 0);
785 bitsdiff = MIN(ec->ec_bitspending,
789 samplesdiff = MIN(ec->ec_samplespending,
799 "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u"
801 " ec->ec_samplespending=%u"
803 bitsdiff, E->bitspending, ec->ec_bitspending,
804 samplesdiff, E->samplespending, ec->ec_samplespending,
811 atomic_store_relaxed(&ec->ec_bitspending,
812 ec->ec_bitspending - bitsdiff);
817 atomic_store_relaxed(&ec->ec_samplespending,
818 ec->ec_samplespending - samplesdiff);
850 entropy_cpu_put(&lock, ec);
920 struct entropy_cpu *ec;
954 ec = entropy_cpu_get(&lock);
955 entpool_enter(ec->ec_pool, buf, len);
956 bitspending = ec->ec_bitspending;
958 atomic_store_relaxed(&ec->ec_bitspending, bitspending);
959 samplespending = ec->ec_samplespending;
962 atomic_store_relaxed(&ec->ec_samplespending, samplespending);
964 entropy_cpu_put(&lock, ec);
968 entropy_account_cpu(ec);
992 struct entropy_cpu *ec;
1023 ec = percpu_getref(entropy_percpu);
1024 if (ec->ec_locked) {
1025 ec->ec_evcnt->intrdrop.ev_count++;
1028 ec->ec_locked = true;
1035 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
1038 ec->ec_evcnt->intrtrunc.ev_count++;
1050 bitspending = ec->ec_bitspending;
1052 atomic_store_relaxed(&ec->ec_bitspending, bitspending);
1054 samplespending = ec->ec_samplespending;
1056 atomic_store_relaxed(&ec->ec_samplespending, samplespending);
1067 KASSERT(ec->ec_locked);
1069 ec->ec_locked = false;
1088 struct entropy_cpu *ec;
1096 ec = entropy_cpu_get(&lock);
1097 ec->ec_evcnt->softint.ev_count++;
1098 entpool_stir(ec->ec_pool);
1099 bitspending = ec->ec_bitspending;
1100 samplespending = ec->ec_samplespending;
1101 entropy_cpu_put(&lock, ec);
1105 entropy_account_cpu(ec);
1173 struct entropy_cpu *ec = ptr;
1178 cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending);
1179 cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending);
1259 struct entropy_cpu *ec;
1272 ec = entropy_cpu_get(&lock);
1274 entpool_extract(ec->ec_pool, buf, sizeof buf);
1275 atomic_store_relaxed(&ec->ec_bitspending, 0);
1276 atomic_store_relaxed(&ec->ec_samplespending, 0);
1278 entropy_cpu_put(&lock, ec);
2443 struct entropy_cpu *ec;
2449 ec = entropy_cpu_get(&lock);
2450 ec->ec_bitspending = 0;
2451 ec->ec_samplespending = 0;
2452 entpool_enter(ec->ec_pool, &extra, sizeof extra);
2453 entropy_cpu_put(&lock, ec);