1 /* $NetBSD: kern_entropy.c,v 1.74 2026/01/04 01:32:52 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * Entropy depletion is available for testing (or if you're into 66 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 67 * the logic to support it is small, to minimize chance of bugs. 68 * 69 * * While cold, a single global entropy pool is available for 70 * entering and extracting, serialized through splhigh/splx. 71 * The per-CPU entropy pool data structures are initialized in 72 * entropy_init and entropy_init_late (separated mainly for 73 * hysterical raisins at this point), but are not used until the 74 * system is warm, at which point access to the global entropy 75 * pool is limited to thread and softint context and serialized 76 * by E->lock. 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.74 2026/01/04 01:32:52 riastradh Exp $"); 81 82 #include <sys/param.h> 83 #include <sys/types.h> 84 #include <sys/atomic.h> 85 #include <sys/compat_stub.h> 86 #include <sys/condvar.h> 87 #include <sys/cpu.h> 88 #include <sys/entropy.h> 89 #include <sys/errno.h> 90 #include <sys/evcnt.h> 91 #include <sys/event.h> 92 #include <sys/file.h> 93 #include <sys/intr.h> 94 #include <sys/kauth.h> 95 #include <sys/kernel.h> 96 #include <sys/kmem.h> 97 #include <sys/kthread.h> 98 #include <sys/lwp.h> 99 #include <sys/module_hook.h> 100 #include <sys/mutex.h> 101 #include <sys/percpu.h> 102 #include <sys/poll.h> 103 #include <sys/proc.h> 104 #include <sys/queue.h> 105 #include <sys/reboot.h> 106 #include <sys/rnd.h> /* legacy kernel API */ 107 #include <sys/rndio.h> /* userland ioctl interface */ 108 #include <sys/rndsource.h> /* kernel rndsource driver API */ 109 #include <sys/sdt.h> 110 #include <sys/select.h> 111 #include <sys/selinfo.h> 112 #include <sys/sha1.h> /* for boot seed checksum */ 113 #include <sys/stdint.h> 114 #include <sys/sysctl.h> 115 #include <sys/syslog.h> 116 #include <sys/systm.h> 117 #include <sys/time.h> 118 #include <sys/xcall.h> 119 120 #include <lib/libkern/entpool.h> 121 122 #include <machine/limits.h> 123 124 #ifdef __HAVE_CPU_COUNTER 125 #include <machine/cpu_counter.h> 126 #endif 127 128 #define MINENTROPYBYTES ENTROPY_CAPACITY 129 #define MINENTROPYBITS (MINENTROPYBYTES*NBBY) 130 #define MINSAMPLES (2*MINENTROPYBITS) 131 132 /* 133 * struct entropy_cpu 134 * 135 * Per-CPU entropy state. The pool is allocated separately 136 * because percpu(9) sometimes moves per-CPU objects around 137 * without zeroing them, which would lead to unwanted copies of 138 * sensitive secrets. The evcnt is allocated separately because 139 * evcnt(9) assumes it stays put in memory. 140 */ 141 struct entropy_cpu { 142 struct entropy_cpu_evcnt { 143 struct evcnt softint; 144 struct evcnt intrdrop; 145 struct evcnt intrtrunc; 146 } *ec_evcnt; 147 struct entpool *ec_pool; 148 unsigned ec_bitspending; 149 unsigned ec_samplespending; 150 bool ec_locked; 151 }; 152 153 /* 154 * struct entropy_cpu_lock 155 * 156 * State for locking the per-CPU entropy state. 157 */ 158 struct entropy_cpu_lock { 159 int ecl_s; 160 long ecl_pctr; 161 }; 162 163 /* 164 * struct rndsource_cpu 165 * 166 * Per-CPU rndsource state. 167 */ 168 struct rndsource_cpu { 169 unsigned rc_entropybits; 170 unsigned rc_timesamples; 171 unsigned rc_datasamples; 172 rnd_delta_t rc_timedelta; 173 }; 174 175 /* 176 * entropy_global (a.k.a. E for short in this file) 177 * 178 * Global entropy state. Writes protected by the global lock. 179 * Some fields, marked (A), can be read outside the lock, and are 180 * maintained with atomic_load/store_relaxed. 181 */ 182 struct { 183 kmutex_t lock; /* covers all global state */ 184 struct entpool pool; /* global pool for extraction */ 185 unsigned bitsneeded; /* (A) needed globally */ 186 unsigned bitspending; /* pending in per-CPU pools */ 187 unsigned samplesneeded; /* (A) needed globally */ 188 unsigned samplespending; /* pending in per-CPU pools */ 189 unsigned timestamp; /* (A) time of last consolidation */ 190 unsigned epoch; /* (A) changes when needed -> 0 */ 191 kcondvar_t cv; /* notifies state changes */ 192 struct selinfo selq; /* notifies needed -> 0 */ 193 struct lwp *sourcelock; /* lock on list of sources */ 194 kcondvar_t sourcelock_cv; /* notifies sourcelock release */ 195 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 196 bool consolidate; /* kick thread to consolidate */ 197 bool seed_rndsource; /* true if seed source is attached */ 198 bool seeded; /* true if seed file already loaded */ 199 } entropy_global __cacheline_aligned = { 200 /* Fields that must be initialized when the kernel is loaded. */ 201 .bitsneeded = MINENTROPYBITS, 202 .samplesneeded = MINSAMPLES, 203 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 204 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 205 }; 206 207 #define E (&entropy_global) /* declutter */ 208 209 /* Read-mostly globals */ 210 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 211 static void *entropy_sih __read_mostly; /* softint handler */ 212 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 213 214 static struct krndsource seed_rndsource __read_mostly; 215 216 /* 217 * Event counters 218 * 219 * Must be careful with adding these because they can serve as 220 * side channels. 221 */ 222 static struct evcnt entropy_discretionary_evcnt = 223 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 224 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 225 static struct evcnt entropy_immediate_evcnt = 226 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 227 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 228 static struct evcnt entropy_partial_evcnt = 229 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 230 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 231 static struct evcnt entropy_consolidate_evcnt = 232 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 233 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 234 static struct evcnt entropy_extract_fail_evcnt = 235 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 236 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 237 static struct evcnt entropy_request_evcnt = 238 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 239 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 240 static struct evcnt entropy_deplete_evcnt = 241 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 242 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 243 static struct evcnt entropy_notify_evcnt = 244 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 245 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 246 247 /* Sysctl knobs */ 248 static bool entropy_collection = 1; 249 static bool entropy_depletion = 0; /* Silly! */ 250 251 static const struct sysctlnode *entropy_sysctlroot; 252 static struct sysctllog *entropy_sysctllog; 253 254 /* Forward declarations */ 255 static void entropy_init_cpu(void *, void *, struct cpu_info *); 256 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 257 static void entropy_account_cpu(struct entropy_cpu *); 258 static void entropy_enter(const void *, size_t, unsigned, bool); 259 static bool entropy_enter_intr(const void *, size_t, unsigned, bool); 260 static void entropy_softintr(void *); 261 static void entropy_thread(void *); 262 static bool entropy_pending(void); 263 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 264 static void entropy_do_consolidate(void); 265 static void entropy_consolidate_xc(void *, void *); 266 static void entropy_notify(void); 267 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 268 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 269 static void filt_entropy_read_detach(struct knote *); 270 static int filt_entropy_read_event(struct knote *, long); 271 static int entropy_request(size_t, int); 272 static void rnd_add_data_internal(struct krndsource *, const void *, 273 uint32_t, uint32_t, bool); 274 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 275 uint32_t, bool, uint32_t, bool); 276 static unsigned rndsource_entropybits(struct krndsource *); 277 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 278 static void rndsource_to_user(struct krndsource *, rndsource_t *); 279 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 280 static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *); 281 282 /* 283 * entropy_timer() 284 * 285 * Cycle counter, time counter, or anything that changes a wee bit 286 * unpredictably. 287 */ 288 static inline uint32_t 289 entropy_timer(void) 290 { 291 struct bintime bt; 292 uint32_t v; 293 294 /* If we have a CPU cycle counter, use the low 32 bits. */ 295 #ifdef __HAVE_CPU_COUNTER 296 if (__predict_true(cpu_hascounter())) 297 return cpu_counter32(); 298 #endif /* __HAVE_CPU_COUNTER */ 299 300 /* If we're cold, tough. Can't binuptime while cold. */ 301 if (__predict_false(cold)) 302 return 0; 303 304 /* Fold the 128 bits of binuptime into 32 bits. */ 305 binuptime(&bt); 306 v = bt.frac; 307 v ^= bt.frac >> 32; 308 v ^= bt.sec; 309 v ^= bt.sec >> 32; 310 return v; 311 } 312 313 static void 314 attach_seed_rndsource(void) 315 { 316 317 KASSERT(!cpu_intr_p()); 318 KASSERT(!cpu_softintr_p()); 319 KASSERT(cold); 320 321 /* 322 * First called no later than entropy_init, while we are still 323 * single-threaded, so no need for RUN_ONCE. 324 */ 325 if (E->seed_rndsource) 326 return; 327 328 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 329 RND_FLAG_COLLECT_VALUE); 330 E->seed_rndsource = true; 331 } 332 333 /* 334 * entropy_init() 335 * 336 * Initialize the entropy subsystem. Panic on failure. 337 * 338 * Requires percpu(9) and sysctl(9) to be initialized. Must run 339 * while cold. 340 */ 341 static void 342 entropy_init(void) 343 { 344 uint32_t extra[2]; 345 struct krndsource *rs; 346 unsigned i = 0; 347 348 KASSERT(cold); 349 350 /* Grab some cycle counts early at boot. */ 351 extra[i++] = entropy_timer(); 352 353 /* Run the entropy pool cryptography self-test. */ 354 if (entpool_selftest() == -1) 355 panic("entropy pool crypto self-test failed"); 356 357 /* Create the sysctl directory. */ 358 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 359 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 360 SYSCTL_DESCR("Entropy (random number sources) options"), 361 NULL, 0, NULL, 0, 362 CTL_KERN, KERN_ENTROPY, CTL_EOL); 363 364 /* Create the sysctl knobs. */ 365 /* XXX These shouldn't be writable at securelevel>0. */ 366 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 367 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 368 SYSCTL_DESCR("Automatically collect entropy from hardware"), 369 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 370 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 371 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 372 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 373 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 374 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 375 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 376 SYSCTL_DESCR("Trigger entropy consolidation now"), 377 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 378 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 379 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 380 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 381 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 382 /* XXX These should maybe not be readable at securelevel>0. */ 383 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 384 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 385 "needed", 386 SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"), 387 NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL); 388 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 389 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 390 "pending", 391 SYSCTL_DESCR("Number of bits of entropy pending on CPUs"), 392 NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL); 393 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 394 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 395 "samplesneeded", 396 SYSCTL_DESCR("Systemwide entropy deficit (samples)"), 397 NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL); 398 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 399 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 400 "samplespending", 401 SYSCTL_DESCR("Number of samples pending on CPUs"), 402 NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL); 403 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 404 CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, 405 "epoch", SYSCTL_DESCR("Entropy epoch"), 406 NULL, 0, &E->epoch, 0, KERN_ENTROPY_EPOCH, CTL_EOL); 407 408 /* Initialize the global state for multithreaded operation. */ 409 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); 410 cv_init(&E->cv, "entropy"); 411 selinit(&E->selq); 412 cv_init(&E->sourcelock_cv, "entsrclock"); 413 414 /* Make sure the seed source is attached. */ 415 attach_seed_rndsource(); 416 417 /* Note if the bootloader didn't provide a seed. */ 418 if (!E->seeded) 419 aprint_debug("entropy: no seed from bootloader\n"); 420 421 /* Allocate the per-CPU records for all early entropy sources. */ 422 LIST_FOREACH(rs, &E->sources, list) 423 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 424 425 /* Allocate and initialize the per-CPU state. */ 426 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 427 entropy_init_cpu, entropy_fini_cpu, NULL); 428 429 /* Enter the boot cycle count to get started. */ 430 extra[i++] = entropy_timer(); 431 KASSERT(i == __arraycount(extra)); 432 entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false); 433 explicit_memset(extra, 0, sizeof extra); 434 } 435 436 /* 437 * entropy_init_late() 438 * 439 * Late initialization. Panic on failure. 440 * 441 * Requires CPUs to have been detected and LWPs to have started. 442 * Must run while cold. 443 */ 444 static void 445 entropy_init_late(void) 446 { 447 int error; 448 449 KASSERT(cold); 450 451 /* 452 * Establish the softint at the highest softint priority level. 453 * Must happen after CPU detection. 454 */ 455 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 456 &entropy_softintr, NULL); 457 if (entropy_sih == NULL) 458 panic("unable to establish entropy softint"); 459 460 /* 461 * Create the entropy housekeeping thread. Must happen after 462 * lwpinit. 463 */ 464 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 465 entropy_thread, NULL, &entropy_lwp, "entbutler"); 466 if (error) 467 panic("unable to create entropy housekeeping thread: %d", 468 error); 469 } 470 471 /* 472 * entropy_init_cpu(ptr, cookie, ci) 473 * 474 * percpu(9) constructor for per-CPU entropy pool. 475 */ 476 static void 477 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 478 { 479 struct entropy_cpu *ec = ptr; 480 const char *cpuname; 481 482 ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP); 483 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 484 ec->ec_bitspending = 0; 485 ec->ec_samplespending = 0; 486 ec->ec_locked = false; 487 488 /* XXX ci_cpuname may not be initialized early enough. */ 489 cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; 490 evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL, 491 cpuname, "entropy softint"); 492 evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL, 493 cpuname, "entropy intrdrop"); 494 evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL, 495 cpuname, "entropy intrtrunc"); 496 } 497 498 /* 499 * entropy_fini_cpu(ptr, cookie, ci) 500 * 501 * percpu(9) destructor for per-CPU entropy pool. 502 */ 503 static void 504 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 505 { 506 struct entropy_cpu *ec = ptr; 507 508 /* 509 * Zero any lingering data. Disclosure of the per-CPU pool 510 * shouldn't retroactively affect the security of any keys 511 * generated, because entpool(9) erases whatever we have just 512 * drawn out of any pool, but better safe than sorry. 513 */ 514 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 515 516 evcnt_detach(&ec->ec_evcnt->intrtrunc); 517 evcnt_detach(&ec->ec_evcnt->intrdrop); 518 evcnt_detach(&ec->ec_evcnt->softint); 519 520 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 521 kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt)); 522 } 523 524 /* 525 * ec = entropy_cpu_get(&lock) 526 * entropy_cpu_put(&lock, ec) 527 * 528 * Lock and unlock the per-CPU entropy state. This only prevents 529 * access on the same CPU -- by hard interrupts, by soft 530 * interrupts, or by other threads. 531 * 532 * Blocks soft interrupts and preemption altogether; doesn't block 533 * hard interrupts, but causes samples in hard interrupts to be 534 * dropped. 535 */ 536 static struct entropy_cpu * 537 entropy_cpu_get(struct entropy_cpu_lock *lock) 538 { 539 struct entropy_cpu *ec; 540 541 ec = percpu_getref(entropy_percpu); 542 lock->ecl_s = splsoftserial(); 543 KASSERT(!ec->ec_locked); 544 ec->ec_locked = true; 545 lock->ecl_pctr = lwp_pctr(); 546 __insn_barrier(); 547 548 return ec; 549 } 550 551 static void 552 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec) 553 { 554 555 KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu())); 556 KASSERT(ec->ec_locked); 557 558 __insn_barrier(); 559 KASSERT(lock->ecl_pctr == lwp_pctr()); 560 ec->ec_locked = false; 561 splx(lock->ecl_s); 562 percpu_putref(entropy_percpu); 563 } 564 565 /* 566 * entropy_seed(seed) 567 * 568 * Seed the entropy pool with seed. Meant to be called as early 569 * as possible by the bootloader; may be called before or after 570 * entropy_init. Must be called before system reaches userland. 571 * Must be called in thread or soft interrupt context, not in hard 572 * interrupt context. Must be called at most once. 573 * 574 * Overwrites the seed in place. Caller may then free the memory. 575 */ 576 static void 577 entropy_seed(rndsave_t *seed) 578 { 579 SHA1_CTX ctx; 580 uint8_t digest[SHA1_DIGEST_LENGTH]; 581 bool seeded; 582 583 KASSERT(!cpu_intr_p()); 584 KASSERT(!cpu_softintr_p()); 585 KASSERT(cold); 586 587 /* 588 * Verify the checksum. If the checksum fails, take the data 589 * but ignore the entropy estimate -- the file may have been 590 * incompletely written with garbage, which is harmless to add 591 * but may not be as unpredictable as alleged. 592 */ 593 SHA1Init(&ctx); 594 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 595 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 596 SHA1Final(digest, &ctx); 597 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 598 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 599 printf("entropy: invalid seed checksum\n"); 600 seed->entropy = 0; 601 } 602 explicit_memset(&ctx, 0, sizeof ctx); 603 explicit_memset(digest, 0, sizeof digest); 604 605 /* 606 * If the entropy is insensibly large, try byte-swapping. 607 * Otherwise assume the file is corrupted and act as though it 608 * has zero entropy. 609 */ 610 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 611 seed->entropy = bswap32(seed->entropy); 612 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 613 seed->entropy = 0; 614 } 615 616 /* Make sure the seed source is attached. */ 617 attach_seed_rndsource(); 618 619 /* Test and set E->seeded. */ 620 seeded = E->seeded; 621 E->seeded = (seed->entropy > 0); 622 623 /* 624 * If we've been seeded, may be re-entering the same seed 625 * (e.g., bootloader vs module init, or something). No harm in 626 * entering it twice, but it contributes no additional entropy. 627 */ 628 if (seeded) { 629 printf("entropy: double-seeded by bootloader\n"); 630 seed->entropy = 0; 631 } else { 632 printf("entropy: entering seed from bootloader" 633 " with %u bits of entropy\n", (unsigned)seed->entropy); 634 } 635 636 /* Enter it into the pool and promptly zero it. */ 637 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 638 seed->entropy); 639 explicit_memset(seed, 0, sizeof(*seed)); 640 } 641 642 /* 643 * entropy_bootrequest() 644 * 645 * Request entropy from all sources at boot, once config is 646 * complete and interrupts are running but we are still cold. 647 */ 648 void 649 entropy_bootrequest(void) 650 { 651 int error; 652 653 KASSERT(!cpu_intr_p()); 654 KASSERT(!cpu_softintr_p()); 655 KASSERT(cold); 656 657 /* 658 * Request enough to satisfy the maximum entropy shortage. 659 * This is harmless overkill if the bootloader provided a seed. 660 */ 661 error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT); 662 KASSERTMSG(error == 0, "error=%d", error); 663 } 664 665 /* 666 * entropy_epoch() 667 * 668 * Returns the current entropy epoch. If this changes, you should 669 * reseed. If -1, means system entropy has not yet reached full 670 * entropy or been explicitly consolidated; never reverts back to 671 * -1. Never zero, so you can always use zero as an uninitialized 672 * sentinel value meaning `reseed ASAP'. 673 * 674 * Usage model: 675 * 676 * struct foo { 677 * struct crypto_prng prng; 678 * unsigned epoch; 679 * } *foo; 680 * 681 * unsigned epoch = entropy_epoch(); 682 * if (__predict_false(epoch != foo->epoch)) { 683 * uint8_t seed[32]; 684 * if (entropy_extract(seed, sizeof seed, 0) != 0) 685 * warn("no entropy"); 686 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 687 * foo->epoch = epoch; 688 * } 689 */ 690 unsigned 691 entropy_epoch(void) 692 { 693 694 /* 695 * Unsigned int, so no need for seqlock for an atomic read, but 696 * make sure we read it afresh each time. 697 */ 698 return atomic_load_relaxed(&E->epoch); 699 } 700 701 /* 702 * entropy_ready() 703 * 704 * True if the entropy pool has full entropy. 705 */ 706 bool 707 entropy_ready(void) 708 { 709 710 return atomic_load_relaxed(&E->bitsneeded) == 0; 711 } 712 713 /* 714 * entropy_account_cpu(ec) 715 * 716 * Consider whether to consolidate entropy into the global pool 717 * after we just added some into the current CPU's pending pool. 718 * 719 * - If this CPU can provide enough entropy now, do so. 720 * 721 * - If this and whatever else is available on other CPUs can 722 * provide enough entropy, kick the consolidation thread. 723 * 724 * - Otherwise, do as little as possible, except maybe consolidate 725 * entropy at most once a minute. 726 * 727 * Caller must be bound to a CPU and therefore have exclusive 728 * access to ec. Will acquire and release the global lock. 729 */ 730 static void 731 entropy_account_cpu(struct entropy_cpu *ec) 732 { 733 struct entropy_cpu_lock lock; 734 struct entropy_cpu *ec0; 735 unsigned bitsdiff, samplesdiff; 736 737 KASSERT(!cpu_intr_p()); 738 KASSERT(!cold); 739 KASSERT(curlwp->l_pflag & LP_BOUND); 740 741 /* 742 * If there's no entropy needed, and entropy has been 743 * consolidated in the last minute, do nothing. 744 */ 745 if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) && 746 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 747 __predict_true((time_uptime - E->timestamp) <= 60)) 748 return; 749 750 /* 751 * Consider consolidation, under the global lock and with the 752 * per-CPU state locked. 753 */ 754 mutex_enter(&E->lock); 755 ec0 = entropy_cpu_get(&lock); 756 KASSERT(ec0 == ec); 757 758 if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) { 759 /* Raced with consolidation xcall. Nothing to do. */ 760 } else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) { 761 /* 762 * If we have not yet attained full entropy but we can 763 * now, do so. This way we disseminate entropy 764 * promptly when it becomes available early at boot; 765 * otherwise we leave it to the entropy consolidation 766 * thread, which is rate-limited to mitigate side 767 * channels and abuse. 768 */ 769 uint8_t buf[ENTPOOL_CAPACITY]; 770 771 /* Transfer from the local pool to the global pool. */ 772 entpool_extract(ec->ec_pool, buf, sizeof buf); 773 entpool_enter(&E->pool, buf, sizeof buf); 774 atomic_store_relaxed(&ec->ec_bitspending, 0); 775 atomic_store_relaxed(&ec->ec_samplespending, 0); 776 atomic_store_relaxed(&E->bitsneeded, 0); 777 atomic_store_relaxed(&E->samplesneeded, 0); 778 779 /* Notify waiters that we now have full entropy. */ 780 entropy_notify(); 781 entropy_immediate_evcnt.ev_count++; 782 } else { 783 /* Determine how much we can add to the global pool. */ 784 KASSERTMSG(E->bitspending <= MINENTROPYBITS, 785 "E->bitspending=%u", E->bitspending); 786 bitsdiff = MIN(ec->ec_bitspending, 787 MINENTROPYBITS - E->bitspending); 788 KASSERTMSG(E->samplespending <= MINSAMPLES, 789 "E->samplespending=%u", E->samplespending); 790 samplesdiff = MIN(ec->ec_samplespending, 791 MINSAMPLES - E->samplespending); 792 793 /* 794 * This should make a difference unless we are already 795 * saturated. 796 */ 797 KASSERTMSG((bitsdiff || samplesdiff || 798 E->bitspending == MINENTROPYBITS || 799 E->samplespending == MINSAMPLES), 800 "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u" 801 "samplesdiff=%u E->samplespending=%u" 802 " ec->ec_samplespending=%u" 803 " minentropybits=%u minsamples=%u", 804 bitsdiff, E->bitspending, ec->ec_bitspending, 805 samplesdiff, E->samplespending, ec->ec_samplespending, 806 (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES); 807 808 /* Add to the global, subtract from the local. */ 809 E->bitspending += bitsdiff; 810 KASSERTMSG(E->bitspending <= MINENTROPYBITS, 811 "E->bitspending=%u", E->bitspending); 812 atomic_store_relaxed(&ec->ec_bitspending, 813 ec->ec_bitspending - bitsdiff); 814 815 E->samplespending += samplesdiff; 816 KASSERTMSG(E->samplespending <= MINSAMPLES, 817 "E->samplespending=%u", E->samplespending); 818 atomic_store_relaxed(&ec->ec_samplespending, 819 ec->ec_samplespending - samplesdiff); 820 821 /* One or the other must have gone up from zero. */ 822 KASSERT(E->bitspending || E->samplespending); 823 824 if (E->bitsneeded <= E->bitspending || 825 E->samplesneeded <= E->samplespending) { 826 /* 827 * Enough bits or at least samples between all 828 * the per-CPU pools. Leave a note for the 829 * housekeeping thread to consolidate entropy 830 * next time it wakes up -- and wake it up if 831 * this is the first time, to speed things up. 832 * 833 * If we don't need any entropy, this doesn't 834 * mean much, but it is the only time we ever 835 * gather additional entropy in case the 836 * accounting has been overly optimistic. This 837 * happens at most once a minute, so there's 838 * negligible performance cost. 839 */ 840 E->consolidate = true; 841 if (E->epoch == (unsigned)-1) 842 cv_broadcast(&E->cv); 843 if (E->bitsneeded == 0) 844 entropy_discretionary_evcnt.ev_count++; 845 } else { 846 /* Can't get full entropy. Keep gathering. */ 847 entropy_partial_evcnt.ev_count++; 848 } 849 } 850 851 entropy_cpu_put(&lock, ec); 852 mutex_exit(&E->lock); 853 } 854 855 /* 856 * entropy_enter_early(buf, len, nbits) 857 * 858 * Do entropy bookkeeping globally, before we have established 859 * per-CPU pools. Enter directly into the global pool in the hope 860 * that we enter enough before the first entropy_extract to thwart 861 * iterative-guessing attacks; entropy_extract will warn if not. 862 */ 863 static void 864 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 865 { 866 bool notify = false; 867 int s; 868 869 KASSERT(cold); 870 871 /* 872 * We're early at boot before multithreading and multi-CPU 873 * operation, and we don't have softints yet to defer 874 * processing from interrupt context, so we have to enter the 875 * samples directly into the global pool. But interrupts may 876 * be enabled, and we enter this path from interrupt context, 877 * so block interrupts until we're done. 878 */ 879 s = splhigh(); 880 881 /* Enter it into the pool. */ 882 entpool_enter(&E->pool, buf, len); 883 884 /* 885 * Decide whether to notify reseed -- we will do so if either: 886 * (a) we transition from partial entropy to full entropy, or 887 * (b) we get a batch of full entropy all at once. 888 * We don't count timing samples because we assume, while cold, 889 * there's not likely to be much jitter yet. 890 */ 891 notify |= (E->bitsneeded && E->bitsneeded <= nbits); 892 notify |= (nbits >= MINENTROPYBITS); 893 894 /* 895 * Subtract from the needed count and notify if appropriate. 896 * We don't count samples here because entropy_timer might 897 * still be returning zero at this point if there's no CPU 898 * cycle counter. 899 */ 900 E->bitsneeded -= MIN(E->bitsneeded, nbits); 901 if (notify) { 902 entropy_notify(); 903 entropy_immediate_evcnt.ev_count++; 904 } 905 906 splx(s); 907 } 908 909 /* 910 * entropy_enter(buf, len, nbits, count) 911 * 912 * Enter len bytes of data from buf into the system's entropy 913 * pool, stirring as necessary when the internal buffer fills up. 914 * nbits is a lower bound on the number of bits of entropy in the 915 * process that led to this sample. 916 */ 917 static void 918 entropy_enter(const void *buf, size_t len, unsigned nbits, bool count) 919 { 920 struct entropy_cpu_lock lock; 921 struct entropy_cpu *ec; 922 unsigned bitspending, samplespending; 923 int bound; 924 925 KASSERTMSG(!cpu_intr_p(), 926 "use entropy_enter_intr from interrupt context"); 927 KASSERTMSG(howmany(nbits, NBBY) <= len, 928 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 929 930 /* 931 * If we're still cold, just use entropy_enter_early to put 932 * samples directly into the global pool. 933 */ 934 if (__predict_false(cold)) { 935 entropy_enter_early(buf, len, nbits); 936 return; 937 } 938 939 /* 940 * Bind ourselves to the current CPU so we don't switch CPUs 941 * between entering data into the current CPU's pool (and 942 * updating the pending count) and transferring it to the 943 * global pool in entropy_account_cpu. 944 */ 945 bound = curlwp_bind(); 946 947 /* 948 * With the per-CPU state locked, enter into the per-CPU pool 949 * and count up what we can add. 950 * 951 * We don't count samples while cold because entropy_timer 952 * might still be returning zero if there's no CPU cycle 953 * counter. 954 */ 955 ec = entropy_cpu_get(&lock); 956 entpool_enter(ec->ec_pool, buf, len); 957 bitspending = ec->ec_bitspending; 958 bitspending += MIN(MINENTROPYBITS - bitspending, nbits); 959 atomic_store_relaxed(&ec->ec_bitspending, bitspending); 960 samplespending = ec->ec_samplespending; 961 if (__predict_true(count)) { 962 samplespending += MIN(MINSAMPLES - samplespending, 1); 963 atomic_store_relaxed(&ec->ec_samplespending, samplespending); 964 } 965 entropy_cpu_put(&lock, ec); 966 967 /* Consolidate globally if appropriate based on what we added. */ 968 if (bitspending > 0 || samplespending >= MINSAMPLES) 969 entropy_account_cpu(ec); 970 971 curlwp_bindx(bound); 972 } 973 974 /* 975 * entropy_enter_intr(buf, len, nbits, count) 976 * 977 * Enter up to len bytes of data from buf into the system's 978 * entropy pool without stirring. nbits is a lower bound on the 979 * number of bits of entropy in the process that led to this 980 * sample. If the sample could be entered completely, assume 981 * nbits of entropy pending; otherwise assume none, since we don't 982 * know whether some parts of the sample are constant, for 983 * instance. Schedule a softint to stir the entropy pool if 984 * needed. Return true if used fully, false if truncated at all. 985 * 986 * Using this in thread or softint context with no spin locks held 987 * will work, but you might as well use entropy_enter in that 988 * case. 989 */ 990 static bool 991 entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count) 992 { 993 struct entropy_cpu *ec; 994 bool fullyused = false; 995 uint32_t bitspending, samplespending; 996 int s; 997 998 KASSERTMSG(howmany(nbits, NBBY) <= len, 999 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 1000 1001 /* 1002 * If we're still cold, just use entropy_enter_early to put 1003 * samples directly into the global pool. 1004 */ 1005 if (__predict_false(cold)) { 1006 entropy_enter_early(buf, len, nbits); 1007 return true; 1008 } 1009 1010 /* 1011 * In case we were called in thread or interrupt context with 1012 * interrupts unblocked, block soft interrupts up to 1013 * IPL_SOFTSERIAL. This way logic that is safe in interrupt 1014 * context or under a spin lock is also safe in less 1015 * restrictive contexts. 1016 */ 1017 s = splsoftserial(); 1018 1019 /* 1020 * Acquire the per-CPU state. If someone is in the middle of 1021 * using it, drop the sample. Otherwise, take the lock so that 1022 * higher-priority interrupts will drop their samples. 1023 */ 1024 ec = percpu_getref(entropy_percpu); 1025 if (ec->ec_locked) { 1026 ec->ec_evcnt->intrdrop.ev_count++; 1027 goto out0; 1028 } 1029 ec->ec_locked = true; 1030 __insn_barrier(); 1031 1032 /* 1033 * Enter as much as we can into the per-CPU pool. If it was 1034 * truncated, schedule a softint to stir the pool and stop. 1035 */ 1036 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 1037 if (__predict_true(!cold)) 1038 softint_schedule(entropy_sih); 1039 ec->ec_evcnt->intrtrunc.ev_count++; 1040 goto out1; 1041 } 1042 fullyused = true; 1043 1044 /* 1045 * Count up what we can contribute. 1046 * 1047 * We don't count samples while cold because entropy_timer 1048 * might still be returning zero if there's no CPU cycle 1049 * counter. 1050 */ 1051 bitspending = ec->ec_bitspending; 1052 bitspending += MIN(MINENTROPYBITS - bitspending, nbits); 1053 atomic_store_relaxed(&ec->ec_bitspending, bitspending); 1054 if (__predict_true(count)) { 1055 samplespending = ec->ec_samplespending; 1056 samplespending += MIN(MINSAMPLES - samplespending, 1); 1057 atomic_store_relaxed(&ec->ec_samplespending, samplespending); 1058 } 1059 1060 /* Schedule a softint if we added anything and it matters. */ 1061 if (__predict_false(atomic_load_relaxed(&E->bitsneeded) || 1062 atomic_load_relaxed(&entropy_depletion)) && 1063 (nbits != 0 || count) && 1064 __predict_true(!cold)) 1065 softint_schedule(entropy_sih); 1066 1067 out1: /* Release the per-CPU state. */ 1068 KASSERT(ec->ec_locked); 1069 __insn_barrier(); 1070 ec->ec_locked = false; 1071 out0: percpu_putref(entropy_percpu); 1072 splx(s); 1073 1074 return fullyused; 1075 } 1076 1077 /* 1078 * entropy_softintr(cookie) 1079 * 1080 * Soft interrupt handler for entering entropy. Takes care of 1081 * stirring the local CPU's entropy pool if it filled up during 1082 * hard interrupts, and promptly crediting entropy from the local 1083 * CPU's entropy pool to the global entropy pool if needed. 1084 */ 1085 static void 1086 entropy_softintr(void *cookie) 1087 { 1088 struct entropy_cpu_lock lock; 1089 struct entropy_cpu *ec; 1090 unsigned bitspending, samplespending; 1091 1092 /* 1093 * With the per-CPU state locked, stir the pool if necessary 1094 * and determine if there's any pending entropy on this CPU to 1095 * account globally. 1096 */ 1097 ec = entropy_cpu_get(&lock); 1098 ec->ec_evcnt->softint.ev_count++; 1099 entpool_stir(ec->ec_pool); 1100 bitspending = ec->ec_bitspending; 1101 samplespending = ec->ec_samplespending; 1102 entropy_cpu_put(&lock, ec); 1103 1104 /* Consolidate globally if appropriate based on what we added. */ 1105 if (bitspending > 0 || samplespending >= MINSAMPLES) 1106 entropy_account_cpu(ec); 1107 } 1108 1109 /* 1110 * entropy_thread(cookie) 1111 * 1112 * Handle any asynchronous entropy housekeeping. 1113 */ 1114 static void 1115 entropy_thread(void *cookie) 1116 { 1117 bool consolidate; 1118 1119 #ifndef _RUMPKERNEL /* XXX rump starts threads before cold */ 1120 KASSERT(!cold); 1121 #endif 1122 1123 for (;;) { 1124 /* 1125 * Wait until there's full entropy somewhere among the 1126 * CPUs, as confirmed at most once per minute, or 1127 * someone wants to consolidate. 1128 */ 1129 if (entropy_pending()) { 1130 consolidate = true; 1131 } else { 1132 mutex_enter(&E->lock); 1133 if (!E->consolidate) 1134 cv_timedwait(&E->cv, &E->lock, 60*hz); 1135 consolidate = E->consolidate; 1136 E->consolidate = false; 1137 mutex_exit(&E->lock); 1138 } 1139 1140 if (consolidate) { 1141 /* Do it. */ 1142 entropy_do_consolidate(); 1143 1144 /* Mitigate abuse. */ 1145 kpause("entropy", false, hz, NULL); 1146 } 1147 } 1148 } 1149 1150 struct entropy_pending_count { 1151 uint32_t bitspending; 1152 uint32_t samplespending; 1153 }; 1154 1155 /* 1156 * entropy_pending() 1157 * 1158 * True if enough bits or samples are pending on other CPUs to 1159 * warrant consolidation. 1160 */ 1161 static bool 1162 entropy_pending(void) 1163 { 1164 struct entropy_pending_count count = { 0, 0 }, *C = &count; 1165 1166 percpu_foreach(entropy_percpu, &entropy_pending_cpu, C); 1167 return C->bitspending >= MINENTROPYBITS || 1168 C->samplespending >= MINSAMPLES; 1169 } 1170 1171 static void 1172 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1173 { 1174 struct entropy_cpu *ec = ptr; 1175 struct entropy_pending_count *C = cookie; 1176 uint32_t cpu_bitspending; 1177 uint32_t cpu_samplespending; 1178 1179 cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending); 1180 cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending); 1181 C->bitspending += MIN(MINENTROPYBITS - C->bitspending, 1182 cpu_bitspending); 1183 C->samplespending += MIN(MINSAMPLES - C->samplespending, 1184 cpu_samplespending); 1185 } 1186 1187 /* 1188 * entropy_do_consolidate() 1189 * 1190 * Issue a cross-call to gather entropy on all CPUs and advance 1191 * the entropy epoch. 1192 */ 1193 static void 1194 entropy_do_consolidate(void) 1195 { 1196 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1197 static struct timeval lasttime; /* serialized by E->lock */ 1198 struct entpool pool; 1199 uint8_t buf[ENTPOOL_CAPACITY]; 1200 unsigned bitsdiff, samplesdiff; 1201 uint64_t ticket; 1202 1203 KASSERT(!cold); 1204 ASSERT_SLEEPABLE(); 1205 1206 /* Gather entropy on all CPUs into a temporary pool. */ 1207 memset(&pool, 0, sizeof pool); 1208 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1209 xc_wait(ticket); 1210 1211 /* Acquire the lock to notify waiters. */ 1212 mutex_enter(&E->lock); 1213 1214 /* Count another consolidation. */ 1215 entropy_consolidate_evcnt.ev_count++; 1216 1217 /* Note when we last consolidated, i.e. now. */ 1218 E->timestamp = time_uptime; 1219 1220 /* Mix what we gathered into the global pool. */ 1221 entpool_extract(&pool, buf, sizeof buf); 1222 entpool_enter(&E->pool, buf, sizeof buf); 1223 explicit_memset(&pool, 0, sizeof pool); 1224 1225 /* Count the entropy that was gathered. */ 1226 bitsdiff = MIN(E->bitsneeded, E->bitspending); 1227 atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff); 1228 E->bitspending -= bitsdiff; 1229 if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) { 1230 if ((boothowto & AB_DEBUG) != 0 && 1231 ratecheck(&lasttime, &interval)) { 1232 printf("WARNING:" 1233 " consolidating less than full entropy\n"); 1234 } 1235 } 1236 1237 samplesdiff = MIN(E->samplesneeded, E->samplespending); 1238 atomic_store_relaxed(&E->samplesneeded, 1239 E->samplesneeded - samplesdiff); 1240 E->samplespending -= samplesdiff; 1241 1242 /* Advance the epoch and notify waiters. */ 1243 entropy_notify(); 1244 1245 /* Release the lock. */ 1246 mutex_exit(&E->lock); 1247 } 1248 1249 /* 1250 * entropy_consolidate_xc(vpool, arg2) 1251 * 1252 * Extract output from the local CPU's input pool and enter it 1253 * into a temporary pool passed as vpool. 1254 */ 1255 static void 1256 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1257 { 1258 struct entpool *pool = vpool; 1259 struct entropy_cpu_lock lock; 1260 struct entropy_cpu *ec; 1261 uint8_t buf[ENTPOOL_CAPACITY]; 1262 uint32_t extra[7]; 1263 unsigned i = 0; 1264 1265 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1266 extra[i++] = cpu_number(); 1267 extra[i++] = entropy_timer(); 1268 1269 /* 1270 * With the per-CPU state locked, extract from the per-CPU pool 1271 * and count it as no longer pending. 1272 */ 1273 ec = entropy_cpu_get(&lock); 1274 extra[i++] = entropy_timer(); 1275 entpool_extract(ec->ec_pool, buf, sizeof buf); 1276 atomic_store_relaxed(&ec->ec_bitspending, 0); 1277 atomic_store_relaxed(&ec->ec_samplespending, 0); 1278 extra[i++] = entropy_timer(); 1279 entropy_cpu_put(&lock, ec); 1280 extra[i++] = entropy_timer(); 1281 1282 /* 1283 * Copy over statistics, and enter the per-CPU extract and the 1284 * extra timing into the temporary pool, under the global lock. 1285 */ 1286 mutex_enter(&E->lock); 1287 extra[i++] = entropy_timer(); 1288 entpool_enter(pool, buf, sizeof buf); 1289 explicit_memset(buf, 0, sizeof buf); 1290 extra[i++] = entropy_timer(); 1291 KASSERT(i == __arraycount(extra)); 1292 entpool_enter(pool, extra, sizeof extra); 1293 explicit_memset(extra, 0, sizeof extra); 1294 mutex_exit(&E->lock); 1295 } 1296 1297 /* 1298 * entropy_notify() 1299 * 1300 * Caller just contributed entropy to the global pool. Advance 1301 * the entropy epoch and notify waiters. 1302 * 1303 * Caller must hold the global entropy lock. 1304 */ 1305 static void 1306 entropy_notify(void) 1307 { 1308 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1309 static struct timeval lasttime; /* serialized by E->lock */ 1310 static bool ready = false, besteffort = false; 1311 unsigned epoch; 1312 1313 KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1314 1315 /* 1316 * If this is the first time, print a message to the console 1317 * that we're ready so operators can compare it to the timing 1318 * of other events. 1319 * 1320 * If we didn't get full entropy from reliable sources, report 1321 * instead that we are running on fumes with best effort. (If 1322 * we ever do get full entropy after that, print the ready 1323 * message once.) 1324 */ 1325 if (__predict_false(!ready)) { 1326 if (E->bitsneeded == 0) { 1327 printf("entropy: ready\n"); 1328 ready = true; 1329 } else if (E->samplesneeded == 0 && !besteffort) { 1330 printf("entropy: best effort\n"); 1331 besteffort = true; 1332 } 1333 } 1334 1335 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1336 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1337 ratecheck(&lasttime, &interval)) { 1338 epoch = E->epoch + 1; 1339 if (epoch == 0 || epoch == (unsigned)-1) 1340 epoch = 1; 1341 atomic_store_relaxed(&E->epoch, epoch); 1342 } 1343 KASSERT(E->epoch != (unsigned)-1); 1344 1345 /* Notify waiters. */ 1346 if (__predict_true(!cold)) { 1347 cv_broadcast(&E->cv); 1348 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1349 } 1350 1351 /* Count another notification. */ 1352 entropy_notify_evcnt.ev_count++; 1353 } 1354 1355 /* 1356 * entropy_consolidate() 1357 * 1358 * Trigger entropy consolidation and wait for it to complete, or 1359 * return EINTR if interrupted by a signal. 1360 * 1361 * This should be used sparingly, not periodically -- requiring 1362 * conscious intervention by the operator or a clear policy 1363 * decision. Otherwise, the kernel will automatically consolidate 1364 * when enough entropy has been gathered into per-CPU pools to 1365 * transition to full entropy. 1366 */ 1367 int 1368 entropy_consolidate(void) 1369 { 1370 uint64_t ticket; 1371 int error; 1372 1373 KASSERT(!cold); 1374 ASSERT_SLEEPABLE(); 1375 1376 mutex_enter(&E->lock); 1377 ticket = entropy_consolidate_evcnt.ev_count; 1378 E->consolidate = true; 1379 cv_broadcast(&E->cv); 1380 while (ticket == entropy_consolidate_evcnt.ev_count) { 1381 error = cv_wait_sig(&E->cv, &E->lock); 1382 if (error) 1383 break; 1384 } 1385 mutex_exit(&E->lock); 1386 1387 return error; 1388 } 1389 1390 /* 1391 * sysctl -w kern.entropy.consolidate=1 1392 * 1393 * Trigger entropy consolidation and wait for it to complete. 1394 * Writable only by superuser. This, writing to /dev/random, and 1395 * ioctl(RNDADDDATA) are the only ways for the system to 1396 * consolidate entropy if the operator knows something the kernel 1397 * doesn't about how unpredictable the pending entropy pools are. 1398 */ 1399 static int 1400 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1401 { 1402 struct sysctlnode node = *rnode; 1403 int arg = 0; 1404 int error; 1405 1406 node.sysctl_data = &arg; 1407 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1408 if (error || newp == NULL) 1409 return error; 1410 if (arg) 1411 error = entropy_consolidate(); 1412 1413 return error; 1414 } 1415 1416 /* 1417 * entropy_gather() 1418 * 1419 * Trigger gathering entropy from all on-demand sources, and, if 1420 * requested, wait for synchronous sources (but not asynchronous 1421 * sources) to complete, or fail with EINTR if interrupted by a 1422 * signal. 1423 */ 1424 int 1425 entropy_gather(void) 1426 { 1427 int error; 1428 1429 mutex_enter(&E->lock); 1430 error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT|ENTROPY_SIG); 1431 mutex_exit(&E->lock); 1432 1433 return error; 1434 } 1435 1436 /* 1437 * sysctl -w kern.entropy.gather=1 1438 * 1439 * Trigger gathering entropy from all on-demand sources, and wait 1440 * for synchronous sources (but not asynchronous sources) to 1441 * complete. Writable only by superuser. 1442 */ 1443 static int 1444 sysctl_entropy_gather(SYSCTLFN_ARGS) 1445 { 1446 struct sysctlnode node = *rnode; 1447 int arg = 0; 1448 int error; 1449 1450 node.sysctl_data = &arg; 1451 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1452 if (error || newp == NULL) 1453 return error; 1454 if (arg) 1455 error = entropy_gather(); 1456 1457 return error; 1458 } 1459 1460 /* 1461 * entropy_extract(buf, len, flags) 1462 * 1463 * Extract len bytes from the global entropy pool into buf. 1464 * 1465 * Caller MUST NOT expose these bytes directly -- must use them 1466 * ONLY to seed a cryptographic pseudorandom number generator 1467 * (`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'), 1468 * and then erase them. entropy_extract does not, on its own, 1469 * provide backtracking resistance -- it must be combined with a 1470 * PRNG/DRBG that does. 1471 * 1472 * This may be used very early at boot, before even entropy_init 1473 * has been called. 1474 * 1475 * You generally shouldn't use this directly -- use cprng(9) 1476 * instead. 1477 * 1478 * Flags may have: 1479 * 1480 * ENTROPY_WAIT Wait for entropy if not available yet. 1481 * ENTROPY_SIG Allow interruption by a signal during wait. 1482 * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1483 * or fail without filling it at all. 1484 * 1485 * Return zero on success, or error on failure: 1486 * 1487 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1488 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1489 * 1490 * If ENTROPY_WAIT is set, allowed only in thread context. If 1491 * ENTROPY_WAIT is not set, allowed also in softint context -- may 1492 * sleep on an adaptive lock up to IPL_SOFTSERIAL. Forbidden in 1493 * hard interrupt context. 1494 */ 1495 int 1496 entropy_extract(void *buf, size_t len, int flags) 1497 { 1498 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1499 static struct timeval lasttime; /* serialized by E->lock */ 1500 bool printed = false; 1501 int s = -1/*XXXGCC*/, error; 1502 1503 if (ISSET(flags, ENTROPY_WAIT)) { 1504 ASSERT_SLEEPABLE(); 1505 KASSERT(!cold); 1506 } 1507 1508 /* Refuse to operate in interrupt context. */ 1509 KASSERT(!cpu_intr_p()); 1510 1511 /* 1512 * If we're cold, we are only contending with interrupts on the 1513 * current CPU, so block them. Otherwise, we are _not_ 1514 * contending with interrupts on the current CPU, but we are 1515 * contending with other threads, to exclude them with a mutex. 1516 */ 1517 if (__predict_false(cold)) 1518 s = splhigh(); 1519 else 1520 mutex_enter(&E->lock); 1521 1522 /* Wait until there is enough entropy in the system. */ 1523 error = 0; 1524 if (E->bitsneeded > 0 && E->samplesneeded == 0) { 1525 /* 1526 * We don't have full entropy from reliable sources, 1527 * but we gathered a plausible number of samples from 1528 * other sources such as timers. Try asking for more 1529 * from any sources we can, but don't worry if it 1530 * fails -- best effort. 1531 */ 1532 (void)entropy_request(ENTROPY_CAPACITY, flags); 1533 } else while (E->bitsneeded > 0 && E->samplesneeded > 0) { 1534 /* Ask for more, synchronously if possible. */ 1535 error = entropy_request(len, flags); 1536 if (error) 1537 break; 1538 1539 /* If we got enough, we're done. */ 1540 if (E->bitsneeded == 0 || E->samplesneeded == 0) { 1541 KASSERT(error == 0); 1542 break; 1543 } 1544 1545 /* If not waiting, stop here. */ 1546 if (!ISSET(flags, ENTROPY_WAIT)) { 1547 error = SET_ERROR(EWOULDBLOCK); 1548 break; 1549 } 1550 1551 /* Wait for some entropy to come in and try again. */ 1552 KASSERT(!cold); 1553 if (!printed) { 1554 printf("entropy: pid %d (%s) waiting for entropy(7)\n", 1555 curproc->p_pid, curproc->p_comm); 1556 printed = true; 1557 } 1558 1559 if (ISSET(flags, ENTROPY_SIG)) { 1560 error = cv_timedwait_sig(&E->cv, &E->lock, hz); 1561 if (error && error != EWOULDBLOCK) 1562 break; 1563 } else { 1564 cv_timedwait(&E->cv, &E->lock, hz); 1565 } 1566 } 1567 1568 /* 1569 * Count failure -- but fill the buffer nevertheless, unless 1570 * the caller specified ENTROPY_HARDFAIL. 1571 */ 1572 if (error) { 1573 if (ISSET(flags, ENTROPY_HARDFAIL)) 1574 goto out; 1575 entropy_extract_fail_evcnt.ev_count++; 1576 } 1577 1578 /* 1579 * Report a warning if we haven't yet reached full entropy. 1580 * This is the only case where we consider entropy to be 1581 * `depleted' without kern.entropy.depletion enabled -- when we 1582 * only have partial entropy, an adversary may be able to 1583 * narrow the state of the pool down to a small number of 1584 * possibilities; the output then enables them to confirm a 1585 * guess, reducing its entropy from the adversary's perspective 1586 * to zero. 1587 * 1588 * This should only happen if the operator has chosen to 1589 * consolidate, either through sysctl kern.entropy.consolidate 1590 * or by writing less than full entropy to /dev/random as root 1591 * (which /dev/random promises will immediately affect 1592 * subsequent output, for better or worse). 1593 */ 1594 if (E->bitsneeded > 0 && E->samplesneeded > 0) { 1595 if (__predict_false(E->epoch == (unsigned)-1) && 1596 ratecheck(&lasttime, &interval)) { 1597 printf("WARNING:" 1598 " system needs entropy for security;" 1599 " see entropy(7)\n"); 1600 } 1601 atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); 1602 atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); 1603 } 1604 1605 /* Extract data from the pool, and `deplete' if we're doing that. */ 1606 entpool_extract(&E->pool, buf, len); 1607 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1608 error == 0) { 1609 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1610 unsigned bitsneeded = E->bitsneeded; 1611 unsigned samplesneeded = E->samplesneeded; 1612 1613 bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost); 1614 samplesneeded += MIN(MINSAMPLES - samplesneeded, cost); 1615 1616 atomic_store_relaxed(&E->bitsneeded, bitsneeded); 1617 atomic_store_relaxed(&E->samplesneeded, samplesneeded); 1618 entropy_deplete_evcnt.ev_count++; 1619 } 1620 1621 out: /* Release the global lock and return the error. */ 1622 if (__predict_false(cold)) 1623 splx(s); 1624 else 1625 mutex_exit(&E->lock); 1626 return error; 1627 } 1628 1629 /* 1630 * entropy_poll(events) 1631 * 1632 * Return the subset of events ready, and if it is not all of 1633 * events, record curlwp as waiting for entropy. 1634 */ 1635 int 1636 entropy_poll(int events) 1637 { 1638 int revents = 0; 1639 1640 KASSERT(!cold); 1641 1642 /* Always ready for writing. */ 1643 revents |= events & (POLLOUT|POLLWRNORM); 1644 1645 /* Narrow it down to reads. */ 1646 events &= POLLIN|POLLRDNORM; 1647 if (events == 0) 1648 return revents; 1649 1650 /* 1651 * If we have reached full entropy and we're not depleting 1652 * entropy, we are forever ready. 1653 */ 1654 if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 || 1655 atomic_load_relaxed(&E->samplesneeded) == 0) && 1656 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1657 return revents | events; 1658 1659 /* 1660 * Otherwise, check whether we need entropy under the lock. If 1661 * we don't, we're ready; if we do, add ourselves to the queue. 1662 */ 1663 mutex_enter(&E->lock); 1664 if (E->bitsneeded == 0 || E->samplesneeded == 0) 1665 revents |= events; 1666 else 1667 selrecord(curlwp, &E->selq); 1668 mutex_exit(&E->lock); 1669 1670 return revents; 1671 } 1672 1673 /* 1674 * filt_entropy_read_detach(kn) 1675 * 1676 * struct filterops::f_detach callback for entropy read events: 1677 * remove kn from the list of waiters. 1678 */ 1679 static void 1680 filt_entropy_read_detach(struct knote *kn) 1681 { 1682 1683 KASSERT(!cold); 1684 1685 mutex_enter(&E->lock); 1686 selremove_knote(&E->selq, kn); 1687 mutex_exit(&E->lock); 1688 } 1689 1690 /* 1691 * filt_entropy_read_event(kn, hint) 1692 * 1693 * struct filterops::f_event callback for entropy read events: 1694 * poll for entropy. Caller must hold the global entropy lock if 1695 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1696 */ 1697 static int 1698 filt_entropy_read_event(struct knote *kn, long hint) 1699 { 1700 int ret; 1701 1702 KASSERT(!cold); 1703 1704 /* Acquire the lock, if caller is outside entropy subsystem. */ 1705 if (hint == NOTE_SUBMIT) 1706 KASSERT(mutex_owned(&E->lock)); 1707 else 1708 mutex_enter(&E->lock); 1709 1710 /* 1711 * If we still need entropy, can't read anything; if not, can 1712 * read arbitrarily much. 1713 */ 1714 if (E->bitsneeded != 0 && E->samplesneeded != 0) { 1715 ret = 0; 1716 } else { 1717 if (atomic_load_relaxed(&entropy_depletion)) 1718 kn->kn_data = ENTROPY_CAPACITY; /* bytes */ 1719 else 1720 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1721 ret = 1; 1722 } 1723 1724 /* Release the lock, if caller is outside entropy subsystem. */ 1725 if (hint == NOTE_SUBMIT) 1726 KASSERT(mutex_owned(&E->lock)); 1727 else 1728 mutex_exit(&E->lock); 1729 1730 return ret; 1731 } 1732 1733 /* XXX Makes sense only for /dev/u?random. */ 1734 static const struct filterops entropy_read_filtops = { 1735 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 1736 .f_attach = NULL, 1737 .f_detach = filt_entropy_read_detach, 1738 .f_event = filt_entropy_read_event, 1739 }; 1740 1741 /* 1742 * entropy_kqfilter(kn) 1743 * 1744 * Register kn to receive entropy event notifications. May be 1745 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1746 */ 1747 int 1748 entropy_kqfilter(struct knote *kn) 1749 { 1750 1751 KASSERT(!cold); 1752 1753 switch (kn->kn_filter) { 1754 case EVFILT_READ: 1755 /* Enter into the global select queue. */ 1756 mutex_enter(&E->lock); 1757 kn->kn_fop = &entropy_read_filtops; 1758 selrecord_knote(&E->selq, kn); 1759 mutex_exit(&E->lock); 1760 return 0; 1761 case EVFILT_WRITE: 1762 /* Can always dump entropy into the system. */ 1763 kn->kn_fop = &seltrue_filtops; 1764 return 0; 1765 default: 1766 return SET_ERROR(EINVAL); 1767 } 1768 } 1769 1770 /* 1771 * rndsource_setcb(rs, get, getarg) 1772 * 1773 * Set the request callback for the entropy source rs, if it can 1774 * provide entropy on demand. Must precede rnd_attach_source. 1775 */ 1776 void 1777 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1778 void *getarg) 1779 { 1780 1781 rs->get = get; 1782 rs->getarg = getarg; 1783 } 1784 1785 /* 1786 * rnd_attach_source(rs, name, type, flags) 1787 * 1788 * Attach the entropy source rs. Must be done after 1789 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1790 */ 1791 void 1792 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1793 uint32_t flags) 1794 { 1795 uint32_t extra[4]; 1796 unsigned i = 0; 1797 1798 KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name"); 1799 1800 /* Grab cycle counter to mix extra into the pool. */ 1801 extra[i++] = entropy_timer(); 1802 1803 /* 1804 * Apply some standard flags: 1805 * 1806 * - We do not bother with network devices by default, for 1807 * hysterical raisins (perhaps: because it is often the case 1808 * that an adversary can influence network packet timings). 1809 */ 1810 switch (type) { 1811 case RND_TYPE_NET: 1812 flags |= RND_FLAG_NO_COLLECT; 1813 break; 1814 } 1815 1816 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1817 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1818 1819 /* Initialize the random source. */ 1820 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1821 strlcpy(rs->name, name, sizeof(rs->name)); 1822 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 1823 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 1824 rs->total = 0; 1825 rs->type = type; 1826 rs->flags = flags; 1827 if (entropy_percpu != NULL) 1828 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1829 extra[i++] = entropy_timer(); 1830 1831 /* Wire it into the global list of random sources. */ 1832 if (__predict_true(!cold)) 1833 mutex_enter(&E->lock); 1834 LIST_INSERT_HEAD(&E->sources, rs, list); 1835 if (__predict_true(!cold)) 1836 mutex_exit(&E->lock); 1837 extra[i++] = entropy_timer(); 1838 1839 /* Request that it provide entropy ASAP, if we can. */ 1840 if (ISSET(flags, RND_FLAG_HASCB)) 1841 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1842 extra[i++] = entropy_timer(); 1843 1844 /* Mix the extra into the pool. */ 1845 KASSERT(i == __arraycount(extra)); 1846 entropy_enter(extra, sizeof extra, 0, /*count*/__predict_true(!cold)); 1847 explicit_memset(extra, 0, sizeof extra); 1848 } 1849 1850 /* 1851 * rnd_detach_source(rs) 1852 * 1853 * Detach the entropy source rs. May sleep waiting for users to 1854 * drain. Further use is not allowed. 1855 */ 1856 void 1857 rnd_detach_source(struct krndsource *rs) 1858 { 1859 1860 /* 1861 * If we're cold (shouldn't happen, but hey), just remove it 1862 * from the list -- there's nothing allocated. 1863 */ 1864 if (__predict_false(cold) && entropy_percpu == NULL) { 1865 LIST_REMOVE(rs, list); 1866 return; 1867 } 1868 1869 /* We may have to wait for entropy_request. */ 1870 ASSERT_SLEEPABLE(); 1871 1872 /* Wait until the source list is not in use, and remove it. */ 1873 mutex_enter(&E->lock); 1874 while (E->sourcelock) 1875 cv_wait(&E->sourcelock_cv, &E->lock); 1876 LIST_REMOVE(rs, list); 1877 mutex_exit(&E->lock); 1878 1879 /* Free the per-CPU data. */ 1880 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1881 } 1882 1883 /* 1884 * rnd_lock_sources(flags) 1885 * 1886 * Lock the list of entropy sources. Caller must hold the global 1887 * entropy lock. If successful, no rndsource will go away until 1888 * rnd_unlock_sources even while the caller releases the global 1889 * entropy lock. 1890 * 1891 * May be called very early at boot, before entropy_init. 1892 * 1893 * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 1894 * If flags & ENTROPY_SIG, allow interruption by signal. 1895 */ 1896 static int __attribute__((warn_unused_result)) 1897 rnd_lock_sources(int flags) 1898 { 1899 int error; 1900 1901 KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1902 KASSERT(!cpu_intr_p()); 1903 1904 while (E->sourcelock) { 1905 KASSERT(!cold); 1906 if (!ISSET(flags, ENTROPY_WAIT)) 1907 return SET_ERROR(EWOULDBLOCK); 1908 if (ISSET(flags, ENTROPY_SIG)) { 1909 error = cv_wait_sig(&E->sourcelock_cv, &E->lock); 1910 if (error) 1911 return error; 1912 } else { 1913 cv_wait(&E->sourcelock_cv, &E->lock); 1914 } 1915 } 1916 1917 E->sourcelock = curlwp; 1918 return 0; 1919 } 1920 1921 /* 1922 * rnd_unlock_sources() 1923 * 1924 * Unlock the list of sources after rnd_lock_sources. Caller must 1925 * hold the global entropy lock. 1926 * 1927 * May be called very early at boot, before entropy_init. 1928 */ 1929 static void 1930 rnd_unlock_sources(void) 1931 { 1932 1933 KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1934 KASSERT(!cpu_intr_p()); 1935 1936 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1937 curlwp, E->sourcelock); 1938 E->sourcelock = NULL; 1939 if (__predict_true(!cold)) 1940 cv_signal(&E->sourcelock_cv); 1941 } 1942 1943 /* 1944 * rnd_sources_locked() 1945 * 1946 * True if we hold the list of rndsources locked, for diagnostic 1947 * assertions. 1948 * 1949 * May be called very early at boot, before entropy_init. 1950 */ 1951 static bool __diagused 1952 rnd_sources_locked(void) 1953 { 1954 1955 return E->sourcelock == curlwp; 1956 } 1957 1958 /* 1959 * entropy_request(nbytes, flags) 1960 * 1961 * Request nbytes bytes of entropy from all sources in the system. 1962 * OK if we overdo it. Caller must hold the global entropy lock; 1963 * will release and re-acquire it. 1964 * 1965 * May be called very early at boot, before entropy_init. 1966 * 1967 * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 1968 * If flags & ENTROPY_SIG, allow interruption by signal. 1969 */ 1970 static int 1971 entropy_request(size_t nbytes, int flags) 1972 { 1973 struct krndsource *rs; 1974 int error; 1975 1976 KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1977 KASSERT(!cpu_intr_p()); 1978 if ((flags & ENTROPY_WAIT) != 0 && __predict_false(!cold)) 1979 ASSERT_SLEEPABLE(); 1980 1981 /* 1982 * Lock the list of entropy sources to block rnd_detach_source 1983 * until we're done, and to serialize calls to the entropy 1984 * callbacks as guaranteed to drivers. 1985 */ 1986 error = rnd_lock_sources(flags); 1987 if (error) 1988 return error; 1989 entropy_request_evcnt.ev_count++; 1990 1991 /* Clamp to the maximum reasonable request. */ 1992 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1993 1994 /* Walk the list of sources. */ 1995 LIST_FOREACH(rs, &E->sources, list) { 1996 /* Skip sources without callbacks. */ 1997 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1998 continue; 1999 2000 /* 2001 * Skip sources that are disabled altogether -- we 2002 * would just ignore their samples anyway. 2003 */ 2004 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 2005 continue; 2006 2007 /* Drop the lock while we call the callback. */ 2008 if (__predict_true(!cold)) 2009 mutex_exit(&E->lock); 2010 (*rs->get)(nbytes, rs->getarg); 2011 if (__predict_true(!cold)) 2012 mutex_enter(&E->lock); 2013 } 2014 2015 /* Request done; unlock the list of entropy sources. */ 2016 rnd_unlock_sources(); 2017 return 0; 2018 } 2019 2020 static inline uint32_t 2021 rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 2022 { 2023 int32_t delta2, delta3; 2024 2025 /* 2026 * Calculate the second and third order differentials 2027 */ 2028 delta2 = d->dx - delta; 2029 if (delta2 < 0) 2030 delta2 = -delta2; /* XXX arithmetic overflow */ 2031 2032 delta3 = d->d2x - delta2; 2033 if (delta3 < 0) 2034 delta3 = -delta3; /* XXX arithmetic overflow */ 2035 2036 d->x = v; 2037 d->dx = delta; 2038 d->d2x = delta2; 2039 2040 /* 2041 * If any delta is 0, we got no entropy. If all are non-zero, we 2042 * might have something. 2043 */ 2044 if (delta == 0 || delta2 == 0 || delta3 == 0) 2045 return 0; 2046 2047 return 1; 2048 } 2049 2050 static inline uint32_t 2051 rnd_dt_estimate(struct krndsource *rs, uint32_t t) 2052 { 2053 int32_t delta; 2054 uint32_t ret; 2055 rnd_delta_t *d; 2056 struct rndsource_cpu *rc; 2057 2058 rc = percpu_getref(rs->state); 2059 d = &rc->rc_timedelta; 2060 2061 if (t < d->x) { 2062 delta = UINT32_MAX - d->x + t; 2063 } else { 2064 delta = d->x - t; 2065 } 2066 2067 if (delta < 0) { 2068 delta = -delta; /* XXX arithmetic overflow */ 2069 } 2070 2071 ret = rnd_delta_estimate(d, t, delta); 2072 2073 KASSERT(d->x == t); 2074 KASSERT(d->dx == delta); 2075 percpu_putref(rs->state); 2076 return ret; 2077 } 2078 2079 /* 2080 * rnd_add_uint32(rs, value) 2081 * 2082 * Enter 32 bits of data from an entropy source into the pool. 2083 * 2084 * May be called from any context or with spin locks held, but may 2085 * drop data. 2086 * 2087 * This is meant for cheaply taking samples from devices that 2088 * aren't designed to be hardware random number generators. 2089 */ 2090 void 2091 rnd_add_uint32(struct krndsource *rs, uint32_t value) 2092 { 2093 bool intr_p = true; 2094 2095 rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 2096 } 2097 2098 void 2099 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 2100 { 2101 bool intr_p = true; 2102 2103 rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 2104 } 2105 2106 void 2107 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 2108 { 2109 bool intr_p = true; 2110 2111 rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 2112 } 2113 2114 /* 2115 * rnd_add_data(rs, buf, len, entropybits) 2116 * 2117 * Enter data from an entropy source into the pool, with a 2118 * driver's estimate of how much entropy the physical source of 2119 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 2120 * estimate and treat it as zero. 2121 * 2122 * rs MAY but SHOULD NOT be NULL. If rs is NULL, MUST NOT be 2123 * called from interrupt context or with spin locks held. 2124 * 2125 * If rs is non-NULL, MAY but SHOULD NOT be called from interrupt 2126 * context, in which case act like rnd_add_data_intr -- if the 2127 * sample buffer is full, schedule a softint and drop any 2128 * additional data on the floor. (This may change later once we 2129 * fix drivers that still call this from interrupt context to use 2130 * rnd_add_data_intr instead.) MUST NOT be called with spin locks 2131 * held if not in hard interrupt context -- i.e., MUST NOT be 2132 * called in thread context or softint context with spin locks 2133 * held. 2134 */ 2135 void 2136 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 2137 uint32_t entropybits) 2138 { 2139 bool intr_p = cpu_intr_p(); /* XXX make this unconditionally false */ 2140 2141 /* 2142 * Weird legacy exception that we should rip out and replace by 2143 * creating new rndsources to attribute entropy to the callers: 2144 * If there's no rndsource, just enter the data and time now. 2145 */ 2146 if (rs == NULL) { 2147 uint32_t extra; 2148 2149 KASSERT(!intr_p); 2150 KASSERTMSG(howmany(entropybits, NBBY) <= len, 2151 "%s: impossible entropy rate:" 2152 " %"PRIu32" bits in %"PRIu32"-byte string", 2153 rs ? rs->name : "(anonymous)", entropybits, len); 2154 entropy_enter(buf, len, entropybits, /*count*/false); 2155 extra = entropy_timer(); 2156 entropy_enter(&extra, sizeof extra, 0, /*count*/false); 2157 explicit_memset(&extra, 0, sizeof extra); 2158 return; 2159 } 2160 2161 rnd_add_data_internal(rs, buf, len, entropybits, intr_p); 2162 } 2163 2164 /* 2165 * rnd_add_data_intr(rs, buf, len, entropybits) 2166 * 2167 * Try to enter data from an entropy source into the pool, with a 2168 * driver's estimate of how much entropy the physical source of 2169 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 2170 * estimate and treat it as zero. If the sample buffer is full, 2171 * schedule a softint and drop any additional data on the floor. 2172 */ 2173 void 2174 rnd_add_data_intr(struct krndsource *rs, const void *buf, uint32_t len, 2175 uint32_t entropybits) 2176 { 2177 bool intr_p = true; 2178 2179 rnd_add_data_internal(rs, buf, len, entropybits, intr_p); 2180 } 2181 2182 /* 2183 * rnd_add_data_internal(rs, buf, len, entropybits, intr_p) 2184 * 2185 * Internal subroutine to decide whether or not to enter data or 2186 * timing for a particular rndsource, and if so, to enter it. 2187 * 2188 * intr_p is true for callers from interrupt context or spin locks 2189 * held, and false for callers from thread or soft interrupt 2190 * context and no spin locks held. 2191 */ 2192 static void 2193 rnd_add_data_internal(struct krndsource *rs, const void *buf, uint32_t len, 2194 uint32_t entropybits, bool intr_p) 2195 { 2196 uint32_t flags; 2197 2198 KASSERTMSG(howmany(entropybits, NBBY) <= len, 2199 "%s: impossible entropy rate:" 2200 " %"PRIu32" bits in %"PRIu32"-byte string", 2201 rs ? rs->name : "(anonymous)", entropybits, len); 2202 2203 /* 2204 * Hold up the reset xcall before it zeroes the entropy counts 2205 * on this CPU or globally. Otherwise, we might leave some 2206 * nonzero entropy attributed to an untrusted source in the 2207 * event of a race with a change to flags. 2208 */ 2209 kpreempt_disable(); 2210 2211 /* Load a snapshot of the flags. Ioctl may change them under us. */ 2212 flags = atomic_load_relaxed(&rs->flags); 2213 2214 /* 2215 * Skip if: 2216 * - we're not collecting entropy, or 2217 * - the operator doesn't want to collect entropy from this, or 2218 * - neither data nor timings are being collected from this. 2219 */ 2220 if (!atomic_load_relaxed(&entropy_collection) || 2221 ISSET(flags, RND_FLAG_NO_COLLECT) || 2222 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 2223 goto out; 2224 2225 /* If asked, ignore the estimate. */ 2226 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 2227 entropybits = 0; 2228 2229 /* If we are collecting data, enter them. */ 2230 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) { 2231 rnd_add_data_1(rs, buf, len, entropybits, /*count*/false, 2232 RND_FLAG_COLLECT_VALUE, intr_p); 2233 } 2234 2235 /* If we are collecting timings, enter one. */ 2236 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 2237 uint32_t extra; 2238 bool count; 2239 2240 /* Sample a timer. */ 2241 extra = entropy_timer(); 2242 2243 /* If asked, do entropy estimation on the time. */ 2244 if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) == 2245 RND_FLAG_ESTIMATE_TIME && __predict_true(!cold)) 2246 count = rnd_dt_estimate(rs, extra); 2247 else 2248 count = false; 2249 2250 rnd_add_data_1(rs, &extra, sizeof extra, 0, count, 2251 RND_FLAG_COLLECT_TIME, intr_p); 2252 } 2253 2254 out: /* Allow concurrent changes to flags to finish. */ 2255 kpreempt_enable(); 2256 } 2257 2258 static unsigned 2259 add_sat(unsigned a, unsigned b) 2260 { 2261 unsigned c = a + b; 2262 2263 return (c < a ? UINT_MAX : c); 2264 } 2265 2266 /* 2267 * rnd_add_data_1(rs, buf, len, entropybits, count, flag) 2268 * 2269 * Internal subroutine to call either entropy_enter_intr, if we're 2270 * in interrupt context, or entropy_enter if not, and to count the 2271 * entropy in an rndsource. 2272 */ 2273 static void 2274 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 2275 uint32_t entropybits, bool count, uint32_t flag, bool intr_p) 2276 { 2277 bool fullyused; 2278 2279 /* 2280 * For the interrupt-like path, use entropy_enter_intr and take 2281 * note of whether it consumed the full sample; otherwise, use 2282 * entropy_enter, which always consumes the full sample. 2283 */ 2284 if (intr_p) { 2285 fullyused = entropy_enter_intr(buf, len, entropybits, count); 2286 } else { 2287 entropy_enter(buf, len, entropybits, count); 2288 fullyused = true; 2289 } 2290 2291 /* 2292 * If we used the full sample, note how many bits were 2293 * contributed from this source. 2294 */ 2295 if (fullyused) { 2296 if (__predict_false(cold)) { 2297 const int s = splhigh(); 2298 rs->total = add_sat(rs->total, entropybits); 2299 switch (flag) { 2300 case RND_FLAG_COLLECT_TIME: 2301 rs->time_delta.insamples = 2302 add_sat(rs->time_delta.insamples, 1); 2303 break; 2304 case RND_FLAG_COLLECT_VALUE: 2305 rs->value_delta.insamples = 2306 add_sat(rs->value_delta.insamples, 1); 2307 break; 2308 } 2309 splx(s); 2310 } else { 2311 struct rndsource_cpu *rc = percpu_getref(rs->state); 2312 2313 atomic_store_relaxed(&rc->rc_entropybits, 2314 add_sat(rc->rc_entropybits, entropybits)); 2315 switch (flag) { 2316 case RND_FLAG_COLLECT_TIME: 2317 atomic_store_relaxed(&rc->rc_timesamples, 2318 add_sat(rc->rc_timesamples, 1)); 2319 break; 2320 case RND_FLAG_COLLECT_VALUE: 2321 atomic_store_relaxed(&rc->rc_datasamples, 2322 add_sat(rc->rc_datasamples, 1)); 2323 break; 2324 } 2325 percpu_putref(rs->state); 2326 } 2327 } 2328 } 2329 2330 /* 2331 * rnd_add_data_sync(rs, buf, len, entropybits) 2332 * 2333 * Same as rnd_add_data. Originally used in rndsource callbacks, 2334 * to break an unnecessary cycle; no longer really needed. 2335 */ 2336 void 2337 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 2338 uint32_t entropybits) 2339 { 2340 2341 rnd_add_data(rs, buf, len, entropybits); 2342 } 2343 2344 /* 2345 * rndsource_entropybits(rs) 2346 * 2347 * Return approximately the number of bits of entropy that have 2348 * been contributed via rs so far. Approximate if other CPUs may 2349 * be calling rnd_add_data concurrently. 2350 */ 2351 static unsigned 2352 rndsource_entropybits(struct krndsource *rs) 2353 { 2354 unsigned nbits = rs->total; 2355 2356 KASSERT(!cold); 2357 KASSERT(rnd_sources_locked()); 2358 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 2359 return nbits; 2360 } 2361 2362 static void 2363 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2364 { 2365 struct rndsource_cpu *rc = ptr; 2366 unsigned *nbitsp = cookie; 2367 unsigned cpu_nbits; 2368 2369 cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits); 2370 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 2371 } 2372 2373 /* 2374 * rndsource_to_user(rs, urs) 2375 * 2376 * Copy a description of rs out to urs for userland. 2377 */ 2378 static void 2379 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 2380 { 2381 2382 KASSERT(!cold); 2383 KASSERT(rnd_sources_locked()); 2384 2385 /* Avoid kernel memory disclosure. */ 2386 memset(urs, 0, sizeof(*urs)); 2387 2388 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 2389 strlcpy(urs->name, rs->name, sizeof(urs->name)); 2390 urs->total = rndsource_entropybits(rs); 2391 urs->type = rs->type; 2392 urs->flags = atomic_load_relaxed(&rs->flags); 2393 } 2394 2395 /* 2396 * rndsource_to_user_est(rs, urse) 2397 * 2398 * Copy a description of rs and estimation statistics out to urse 2399 * for userland. 2400 */ 2401 static void 2402 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 2403 { 2404 2405 KASSERT(!cold); 2406 KASSERT(rnd_sources_locked()); 2407 2408 /* Avoid kernel memory disclosure. */ 2409 memset(urse, 0, sizeof(*urse)); 2410 2411 /* Copy out the rndsource description. */ 2412 rndsource_to_user(rs, &urse->rt); 2413 2414 /* Gather the statistics. */ 2415 urse->dt_samples = rs->time_delta.insamples; 2416 urse->dt_total = 0; 2417 urse->dv_samples = rs->value_delta.insamples; 2418 urse->dv_total = urse->rt.total; 2419 percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse); 2420 } 2421 2422 static void 2423 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2424 { 2425 struct rndsource_cpu *rc = ptr; 2426 rndsource_est_t *urse = cookie; 2427 2428 urse->dt_samples = add_sat(urse->dt_samples, 2429 atomic_load_relaxed(&rc->rc_timesamples)); 2430 urse->dv_samples = add_sat(urse->dv_samples, 2431 atomic_load_relaxed(&rc->rc_datasamples)); 2432 } 2433 2434 /* 2435 * entropy_reset_xc(arg1, arg2) 2436 * 2437 * Reset the current CPU's pending entropy to zero. 2438 */ 2439 static void 2440 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 2441 { 2442 uint32_t extra = entropy_timer(); 2443 struct entropy_cpu_lock lock; 2444 struct entropy_cpu *ec; 2445 2446 /* 2447 * With the per-CPU state locked, zero the pending count and 2448 * enter a cycle count for fun. 2449 */ 2450 ec = entropy_cpu_get(&lock); 2451 ec->ec_bitspending = 0; 2452 ec->ec_samplespending = 0; 2453 entpool_enter(ec->ec_pool, &extra, sizeof extra); 2454 entropy_cpu_put(&lock, ec); 2455 } 2456 2457 /* 2458 * entropy_reset() 2459 * 2460 * Assume the entropy pool has been exposed, e.g. because the VM 2461 * has been cloned. Nix all the pending entropy and set the 2462 * needed to maximum. 2463 */ 2464 void 2465 entropy_reset(void) 2466 { 2467 2468 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2469 mutex_enter(&E->lock); 2470 E->bitspending = 0; 2471 E->samplespending = 0; 2472 atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); 2473 atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); 2474 E->consolidate = false; 2475 mutex_exit(&E->lock); 2476 } 2477 2478 /* 2479 * entropy_ioctl(cmd, data) 2480 * 2481 * Handle various /dev/random ioctl queries. 2482 */ 2483 int 2484 entropy_ioctl(unsigned long cmd, void *data) 2485 { 2486 struct krndsource *rs; 2487 bool privileged; 2488 int error; 2489 2490 KASSERT(!cold); 2491 2492 /* Verify user's authorization to perform the ioctl. */ 2493 switch (cmd) { 2494 case RNDGETENTCNT: 2495 case RNDGETPOOLSTAT: 2496 case RNDGETSRCNUM: 2497 case RNDGETSRCNAME: 2498 case RNDGETESTNUM: 2499 case RNDGETESTNAME: 2500 error = kauth_authorize_device(kauth_cred_get(), 2501 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2502 break; 2503 case RNDCTL: 2504 error = kauth_authorize_device(kauth_cred_get(), 2505 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2506 break; 2507 case RNDADDDATA: 2508 error = kauth_authorize_device(kauth_cred_get(), 2509 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2510 /* Ascertain whether the user's inputs should be counted. */ 2511 if (kauth_authorize_device(kauth_cred_get(), 2512 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2513 NULL, NULL, NULL, NULL) == 0) 2514 privileged = true; 2515 break; 2516 default: { 2517 /* 2518 * XXX Hack to avoid changing module ABI so this can be 2519 * pulled up. Later, we can just remove the argument. 2520 */ 2521 static const struct fileops fops = { 2522 .fo_ioctl = rnd_system_ioctl, 2523 }; 2524 struct file f = { 2525 .f_ops = &fops, 2526 }; 2527 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2528 enosys(), error); 2529 #if defined(_LP64) 2530 if (error == ENOSYS) 2531 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2532 enosys(), error); 2533 #endif 2534 if (error == ENOSYS) 2535 error = SET_ERROR(ENOTTY); 2536 break; 2537 } 2538 } 2539 2540 /* If anything went wrong with authorization, stop here. */ 2541 if (error) 2542 return error; 2543 2544 /* Dispatch on the command. */ 2545 switch (cmd) { 2546 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2547 uint32_t *countp = data; 2548 2549 mutex_enter(&E->lock); 2550 *countp = MINENTROPYBITS - E->bitsneeded; 2551 mutex_exit(&E->lock); 2552 2553 break; 2554 } 2555 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2556 rndpoolstat_t *pstat = data; 2557 2558 mutex_enter(&E->lock); 2559 2560 /* parameters */ 2561 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2562 pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */ 2563 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2564 2565 /* state */ 2566 pstat->added = 0; /* XXX total entropy_enter count */ 2567 pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */ 2568 pstat->removed = 0; /* XXX total entropy_extract count */ 2569 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2570 2571 /* 2572 * This used to be bits of data fabricated in some 2573 * sense; we'll take it to mean number of samples, 2574 * excluding the bits of entropy from HWRNG or seed. 2575 */ 2576 pstat->generated = MINSAMPLES - E->samplesneeded; 2577 pstat->generated -= MIN(pstat->generated, pstat->curentropy); 2578 2579 mutex_exit(&E->lock); 2580 break; 2581 } 2582 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2583 rndstat_t *stat = data; 2584 uint32_t start = 0, i = 0; 2585 2586 /* Skip if none requested; fail if too many requested. */ 2587 if (stat->count == 0) 2588 break; 2589 if (stat->count > RND_MAXSTATCOUNT) 2590 return SET_ERROR(EINVAL); 2591 2592 /* 2593 * Under the lock, find the first one, copy out as many 2594 * as requested, and report how many we copied out. 2595 */ 2596 mutex_enter(&E->lock); 2597 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2598 if (error) { 2599 mutex_exit(&E->lock); 2600 return error; 2601 } 2602 LIST_FOREACH(rs, &E->sources, list) { 2603 if (start++ == stat->start) 2604 break; 2605 } 2606 while (i < stat->count && rs != NULL) { 2607 mutex_exit(&E->lock); 2608 rndsource_to_user(rs, &stat->source[i++]); 2609 mutex_enter(&E->lock); 2610 rs = LIST_NEXT(rs, list); 2611 } 2612 KASSERT(i <= stat->count); 2613 stat->count = i; 2614 rnd_unlock_sources(); 2615 mutex_exit(&E->lock); 2616 break; 2617 } 2618 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2619 rndstat_est_t *estat = data; 2620 uint32_t start = 0, i = 0; 2621 2622 /* Skip if none requested; fail if too many requested. */ 2623 if (estat->count == 0) 2624 break; 2625 if (estat->count > RND_MAXSTATCOUNT) 2626 return SET_ERROR(EINVAL); 2627 2628 /* 2629 * Under the lock, find the first one, copy out as many 2630 * as requested, and report how many we copied out. 2631 */ 2632 mutex_enter(&E->lock); 2633 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2634 if (error) { 2635 mutex_exit(&E->lock); 2636 return error; 2637 } 2638 LIST_FOREACH(rs, &E->sources, list) { 2639 if (start++ == estat->start) 2640 break; 2641 } 2642 while (i < estat->count && rs != NULL) { 2643 mutex_exit(&E->lock); 2644 rndsource_to_user_est(rs, &estat->source[i++]); 2645 mutex_enter(&E->lock); 2646 rs = LIST_NEXT(rs, list); 2647 } 2648 KASSERT(i <= estat->count); 2649 estat->count = i; 2650 rnd_unlock_sources(); 2651 mutex_exit(&E->lock); 2652 break; 2653 } 2654 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2655 rndstat_name_t *nstat = data; 2656 const size_t n = sizeof(rs->name); 2657 2658 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2659 2660 /* 2661 * Under the lock, search by name. If found, copy it 2662 * out; if not found, fail with ENOENT. 2663 */ 2664 mutex_enter(&E->lock); 2665 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2666 if (error) { 2667 mutex_exit(&E->lock); 2668 return error; 2669 } 2670 LIST_FOREACH(rs, &E->sources, list) { 2671 if (strncmp(rs->name, nstat->name, n) == 0) 2672 break; 2673 } 2674 if (rs != NULL) { 2675 mutex_exit(&E->lock); 2676 rndsource_to_user(rs, &nstat->source); 2677 mutex_enter(&E->lock); 2678 } else { 2679 error = SET_ERROR(ENOENT); 2680 } 2681 rnd_unlock_sources(); 2682 mutex_exit(&E->lock); 2683 break; 2684 } 2685 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2686 rndstat_est_name_t *enstat = data; 2687 const size_t n = sizeof(rs->name); 2688 2689 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2690 2691 /* 2692 * Under the lock, search by name. If found, copy it 2693 * out; if not found, fail with ENOENT. 2694 */ 2695 mutex_enter(&E->lock); 2696 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2697 if (error) { 2698 mutex_exit(&E->lock); 2699 return error; 2700 } 2701 LIST_FOREACH(rs, &E->sources, list) { 2702 if (strncmp(rs->name, enstat->name, n) == 0) 2703 break; 2704 } 2705 if (rs != NULL) { 2706 mutex_exit(&E->lock); 2707 rndsource_to_user_est(rs, &enstat->source); 2708 mutex_enter(&E->lock); 2709 } else { 2710 error = SET_ERROR(ENOENT); 2711 } 2712 rnd_unlock_sources(); 2713 mutex_exit(&E->lock); 2714 break; 2715 } 2716 case RNDCTL: { /* Modify entropy source flags. */ 2717 rndctl_t *rndctl = data; 2718 const size_t n = sizeof(rs->name); 2719 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2720 uint32_t flags; 2721 bool reset = false, request = false; 2722 2723 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2724 2725 /* Whitelist the flags that user can change. */ 2726 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2727 2728 /* 2729 * For each matching rndsource, either by type if 2730 * specified or by name if not, set the masked flags. 2731 */ 2732 mutex_enter(&E->lock); 2733 LIST_FOREACH(rs, &E->sources, list) { 2734 if (rndctl->type != 0xff) { 2735 if (rs->type != rndctl->type) 2736 continue; 2737 } else if (rndctl->name[0] != '\0') { 2738 if (strncmp(rs->name, rndctl->name, n) != 0) 2739 continue; 2740 } 2741 flags = rs->flags & ~rndctl->mask; 2742 flags |= rndctl->flags & rndctl->mask; 2743 if ((rs->flags & resetflags) == 0 && 2744 (flags & resetflags) != 0) 2745 reset = true; 2746 if ((rs->flags ^ flags) & resetflags) 2747 request = true; 2748 atomic_store_relaxed(&rs->flags, flags); 2749 } 2750 mutex_exit(&E->lock); 2751 2752 /* 2753 * If we disabled estimation or collection, nix all the 2754 * pending entropy and set needed to the maximum. 2755 */ 2756 if (reset) 2757 entropy_reset(); 2758 2759 /* 2760 * If we changed any of the estimation or collection 2761 * flags, request new samples from everyone -- either 2762 * to make up for what we just lost, or to get new 2763 * samples from what we just added. 2764 * 2765 * Failing on signal, while waiting for another process 2766 * to finish requesting entropy, is OK here even though 2767 * we have committed side effects, because this ioctl 2768 * command is idempotent, so repeating it is safe. 2769 */ 2770 if (request) 2771 error = entropy_gather(); 2772 break; 2773 } 2774 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2775 rnddata_t *rdata = data; 2776 unsigned entropybits = 0; 2777 2778 if (!atomic_load_relaxed(&entropy_collection)) 2779 break; /* thanks but no thanks */ 2780 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2781 return SET_ERROR(EINVAL); 2782 2783 /* 2784 * This ioctl serves as the userland alternative a 2785 * bootloader-provided seed -- typically furnished by 2786 * /etc/rc.d/random_seed. We accept the user's entropy 2787 * claim only if 2788 * 2789 * (a) the user is privileged, and 2790 * (b) we have not entered a bootloader seed. 2791 * 2792 * under the assumption that the user may use this to 2793 * load a seed from disk that we have already loaded 2794 * from the bootloader, so we don't double-count it. 2795 */ 2796 if (privileged && rdata->entropy && rdata->len) { 2797 mutex_enter(&E->lock); 2798 if (!E->seeded) { 2799 entropybits = MIN(rdata->entropy, 2800 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2801 E->seeded = true; 2802 } 2803 mutex_exit(&E->lock); 2804 } 2805 2806 /* Enter the data and consolidate entropy. */ 2807 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2808 entropybits); 2809 error = entropy_consolidate(); 2810 break; 2811 } 2812 default: 2813 error = SET_ERROR(ENOTTY); 2814 } 2815 2816 /* Return any error that may have come up. */ 2817 return error; 2818 } 2819 2820 /* Legacy entry points */ 2821 2822 void 2823 rnd_seed(void *seed, size_t len) 2824 { 2825 2826 if (len != sizeof(rndsave_t)) { 2827 printf("entropy: invalid seed length: %zu," 2828 " expected sizeof(rndsave_t) = %zu\n", 2829 len, sizeof(rndsave_t)); 2830 return; 2831 } 2832 entropy_seed(seed); 2833 } 2834 2835 void 2836 rnd_init(void) 2837 { 2838 2839 entropy_init(); 2840 } 2841 2842 void 2843 rnd_init_softint(void) 2844 { 2845 2846 entropy_init_late(); 2847 entropy_bootrequest(); 2848 } 2849 2850 int 2851 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2852 { 2853 2854 return entropy_ioctl(cmd, data); 2855 } 2856