kern_entropy.c revision 1.29 1 /* $NetBSD: kern_entropy.c,v 1.29 2021/01/21 17:33:55 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Entropy subsystem
34 *
35 * * Each CPU maintains a per-CPU entropy pool so that gathering
36 * entropy requires no interprocessor synchronization, except
37 * early at boot when we may be scrambling to gather entropy as
38 * soon as possible.
39 *
40 * - entropy_enter gathers entropy and never drops it on the
41 * floor, at the cost of sometimes having to do cryptography.
42 *
43 * - entropy_enter_intr gathers entropy or drops it on the
44 * floor, with low latency. Work to stir the pool or kick the
45 * housekeeping thread is scheduled in soft interrupts.
46 *
47 * * entropy_enter immediately enters into the global pool if it
48 * can transition to full entropy in one swell foop. Otherwise,
49 * it defers to a housekeeping thread that consolidates entropy,
50 * but only when the CPUs collectively have full entropy, in
51 * order to mitigate iterative-guessing attacks.
52 *
53 * * The entropy housekeeping thread continues to consolidate
54 * entropy even after we think we have full entropy, in case we
55 * are wrong, but is limited to one discretionary consolidation
56 * per minute, and only when new entropy is actually coming in,
57 * to limit performance impact.
58 *
59 * * The entropy epoch is the number that changes when we
60 * transition from partial entropy to full entropy, so that
61 * users can easily determine when to reseed. This also
62 * facilitates an operator explicitly causing everything to
63 * reseed by sysctl -w kern.entropy.consolidate=1.
64 *
65 * * No entropy estimation based on the sample values, which is a
66 * contradiction in terms and a potential source of side
67 * channels. It is the responsibility of the driver author to
68 * study how predictable the physical source of input can ever
69 * be, and to furnish a lower bound on the amount of entropy it
70 * has.
71 *
72 * * Entropy depletion is available for testing (or if you're into
73 * that sort of thing), with sysctl -w kern.entropy.depletion=1;
74 * the logic to support it is small, to minimize chance of bugs.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.29 2021/01/21 17:33:55 riastradh Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82 #include <sys/atomic.h>
83 #include <sys/compat_stub.h>
84 #include <sys/condvar.h>
85 #include <sys/cpu.h>
86 #include <sys/entropy.h>
87 #include <sys/errno.h>
88 #include <sys/evcnt.h>
89 #include <sys/event.h>
90 #include <sys/file.h>
91 #include <sys/intr.h>
92 #include <sys/kauth.h>
93 #include <sys/kernel.h>
94 #include <sys/kmem.h>
95 #include <sys/kthread.h>
96 #include <sys/module_hook.h>
97 #include <sys/mutex.h>
98 #include <sys/percpu.h>
99 #include <sys/poll.h>
100 #include <sys/queue.h>
101 #include <sys/rnd.h> /* legacy kernel API */
102 #include <sys/rndio.h> /* userland ioctl interface */
103 #include <sys/rndsource.h> /* kernel rndsource driver API */
104 #include <sys/select.h>
105 #include <sys/selinfo.h>
106 #include <sys/sha1.h> /* for boot seed checksum */
107 #include <sys/stdint.h>
108 #include <sys/sysctl.h>
109 #include <sys/syslog.h>
110 #include <sys/systm.h>
111 #include <sys/time.h>
112 #include <sys/xcall.h>
113
114 #include <lib/libkern/entpool.h>
115
116 #include <machine/limits.h>
117
118 #ifdef __HAVE_CPU_COUNTER
119 #include <machine/cpu_counter.h>
120 #endif
121
122 /*
123 * struct entropy_cpu
124 *
125 * Per-CPU entropy state. The pool is allocated separately
126 * because percpu(9) sometimes moves per-CPU objects around
127 * without zeroing them, which would lead to unwanted copies of
128 * sensitive secrets. The evcnt is allocated separately becuase
129 * evcnt(9) assumes it stays put in memory.
130 */
131 struct entropy_cpu {
132 struct evcnt *ec_softint_evcnt;
133 struct entpool *ec_pool;
134 unsigned ec_pending;
135 bool ec_locked;
136 };
137
138 /*
139 * struct rndsource_cpu
140 *
141 * Per-CPU rndsource state.
142 */
143 struct rndsource_cpu {
144 unsigned rc_entropybits;
145 unsigned rc_timesamples;
146 unsigned rc_datasamples;
147 };
148
149 /*
150 * entropy_global (a.k.a. E for short in this file)
151 *
152 * Global entropy state. Writes protected by the global lock.
153 * Some fields, marked (A), can be read outside the lock, and are
154 * maintained with atomic_load/store_relaxed.
155 */
156 struct {
157 kmutex_t lock; /* covers all global state */
158 struct entpool pool; /* global pool for extraction */
159 unsigned needed; /* (A) needed globally */
160 unsigned pending; /* (A) pending in per-CPU pools */
161 unsigned timestamp; /* (A) time of last consolidation */
162 unsigned epoch; /* (A) changes when needed -> 0 */
163 kcondvar_t cv; /* notifies state changes */
164 struct selinfo selq; /* notifies needed -> 0 */
165 struct lwp *sourcelock; /* lock on list of sources */
166 kcondvar_t sourcelock_cv; /* notifies sourcelock release */
167 LIST_HEAD(,krndsource) sources; /* list of entropy sources */
168 enum entropy_stage {
169 ENTROPY_COLD = 0, /* single-threaded */
170 ENTROPY_WARM, /* multi-threaded at boot before CPUs */
171 ENTROPY_HOT, /* multi-threaded multi-CPU */
172 } stage;
173 bool consolidate; /* kick thread to consolidate */
174 bool seed_rndsource; /* true if seed source is attached */
175 bool seeded; /* true if seed file already loaded */
176 } entropy_global __cacheline_aligned = {
177 /* Fields that must be initialized when the kernel is loaded. */
178 .needed = ENTROPY_CAPACITY*NBBY,
179 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */
180 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
181 .stage = ENTROPY_COLD,
182 };
183
184 #define E (&entropy_global) /* declutter */
185
186 /* Read-mostly globals */
187 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */
188 static void *entropy_sih __read_mostly; /* softint handler */
189 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */
190
191 int rnd_initial_entropy __read_mostly; /* XXX legacy */
192
193 static struct krndsource seed_rndsource __read_mostly;
194
195 /*
196 * Event counters
197 *
198 * Must be careful with adding these because they can serve as
199 * side channels.
200 */
201 static struct evcnt entropy_discretionary_evcnt =
202 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
203 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
204 static struct evcnt entropy_immediate_evcnt =
205 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
206 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
207 static struct evcnt entropy_partial_evcnt =
208 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
209 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
210 static struct evcnt entropy_consolidate_evcnt =
211 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
212 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
213 static struct evcnt entropy_extract_intr_evcnt =
214 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr");
215 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt);
216 static struct evcnt entropy_extract_fail_evcnt =
217 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
218 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
219 static struct evcnt entropy_request_evcnt =
220 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
221 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
222 static struct evcnt entropy_deplete_evcnt =
223 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
224 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
225 static struct evcnt entropy_notify_evcnt =
226 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
227 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
228
229 /* Sysctl knobs */
230 static bool entropy_collection = 1;
231 static bool entropy_depletion = 0; /* Silly! */
232
233 static const struct sysctlnode *entropy_sysctlroot;
234 static struct sysctllog *entropy_sysctllog;
235
236 /* Forward declarations */
237 static void entropy_init_cpu(void *, void *, struct cpu_info *);
238 static void entropy_fini_cpu(void *, void *, struct cpu_info *);
239 static void entropy_account_cpu(struct entropy_cpu *);
240 static void entropy_enter(const void *, size_t, unsigned);
241 static bool entropy_enter_intr(const void *, size_t, unsigned);
242 static void entropy_softintr(void *);
243 static void entropy_thread(void *);
244 static uint32_t entropy_pending(void);
245 static void entropy_pending_cpu(void *, void *, struct cpu_info *);
246 static void entropy_do_consolidate(void);
247 static void entropy_consolidate_xc(void *, void *);
248 static void entropy_notify(void);
249 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS);
250 static int sysctl_entropy_gather(SYSCTLFN_ARGS);
251 static void filt_entropy_read_detach(struct knote *);
252 static int filt_entropy_read_event(struct knote *, long);
253 static void entropy_request(size_t);
254 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t,
255 uint32_t, uint32_t);
256 static unsigned rndsource_entropybits(struct krndsource *);
257 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
258 static void rndsource_to_user(struct krndsource *, rndsource_t *);
259 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
260 static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
261
262 /*
263 * entropy_timer()
264 *
265 * Cycle counter, time counter, or anything that changes a wee bit
266 * unpredictably.
267 */
268 static inline uint32_t
269 entropy_timer(void)
270 {
271 struct bintime bt;
272 uint32_t v;
273
274 /* If we have a CPU cycle counter, use the low 32 bits. */
275 #ifdef __HAVE_CPU_COUNTER
276 if (__predict_true(cpu_hascounter()))
277 return cpu_counter32();
278 #endif /* __HAVE_CPU_COUNTER */
279
280 /* If we're cold, tough. Can't binuptime while cold. */
281 if (__predict_false(cold))
282 return 0;
283
284 /* Fold the 128 bits of binuptime into 32 bits. */
285 binuptime(&bt);
286 v = bt.frac;
287 v ^= bt.frac >> 32;
288 v ^= bt.sec;
289 v ^= bt.sec >> 32;
290 return v;
291 }
292
293 static void
294 attach_seed_rndsource(void)
295 {
296
297 /*
298 * First called no later than entropy_init, while we are still
299 * single-threaded, so no need for RUN_ONCE.
300 */
301 if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
302 return;
303 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
304 RND_FLAG_COLLECT_VALUE);
305 E->seed_rndsource = true;
306 }
307
308 /*
309 * entropy_init()
310 *
311 * Initialize the entropy subsystem. Panic on failure.
312 *
313 * Requires percpu(9) and sysctl(9) to be initialized.
314 */
315 static void
316 entropy_init(void)
317 {
318 uint32_t extra[2];
319 struct krndsource *rs;
320 unsigned i = 0;
321
322 KASSERT(E->stage == ENTROPY_COLD);
323
324 /* Grab some cycle counts early at boot. */
325 extra[i++] = entropy_timer();
326
327 /* Run the entropy pool cryptography self-test. */
328 if (entpool_selftest() == -1)
329 panic("entropy pool crypto self-test failed");
330
331 /* Create the sysctl directory. */
332 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
333 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
334 SYSCTL_DESCR("Entropy (random number sources) options"),
335 NULL, 0, NULL, 0,
336 CTL_KERN, CTL_CREATE, CTL_EOL);
337
338 /* Create the sysctl knobs. */
339 /* XXX These shouldn't be writable at securelevel>0. */
340 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
341 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
342 SYSCTL_DESCR("Automatically collect entropy from hardware"),
343 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
344 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
345 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
346 SYSCTL_DESCR("`Deplete' entropy pool when observed"),
347 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
348 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
349 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
350 SYSCTL_DESCR("Trigger entropy consolidation now"),
351 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
352 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
353 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
354 SYSCTL_DESCR("Trigger entropy gathering from sources now"),
355 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
356 /* XXX These should maybe not be readable at securelevel>0. */
357 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
358 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
359 "needed", SYSCTL_DESCR("Systemwide entropy deficit"),
360 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL);
361 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
362 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
363 "pending", SYSCTL_DESCR("Entropy pending on CPUs"),
364 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL);
365 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
366 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
367 "epoch", SYSCTL_DESCR("Entropy epoch"),
368 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
369
370 /* Initialize the global state for multithreaded operation. */
371 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM);
372 cv_init(&E->cv, "entropy");
373 selinit(&E->selq);
374 cv_init(&E->sourcelock_cv, "entsrclock");
375
376 /* Make sure the seed source is attached. */
377 attach_seed_rndsource();
378
379 /* Note if the bootloader didn't provide a seed. */
380 if (!E->seeded)
381 aprint_debug("entropy: no seed from bootloader\n");
382
383 /* Allocate the per-CPU records for all early entropy sources. */
384 LIST_FOREACH(rs, &E->sources, list)
385 rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
386
387 /* Enter the boot cycle count to get started. */
388 extra[i++] = entropy_timer();
389 KASSERT(i == __arraycount(extra));
390 entropy_enter(extra, sizeof extra, 0);
391 explicit_memset(extra, 0, sizeof extra);
392
393 /* We are now ready for multi-threaded operation. */
394 E->stage = ENTROPY_WARM;
395 }
396
397 /*
398 * entropy_init_late()
399 *
400 * Late initialization. Panic on failure.
401 *
402 * Requires CPUs to have been detected and LWPs to have started.
403 */
404 static void
405 entropy_init_late(void)
406 {
407 int error;
408
409 KASSERT(E->stage == ENTROPY_WARM);
410
411 /* Allocate and initialize the per-CPU state. */
412 entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
413 entropy_init_cpu, entropy_fini_cpu, NULL);
414
415 /*
416 * Establish the softint at the highest softint priority level.
417 * Must happen after CPU detection.
418 */
419 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
420 &entropy_softintr, NULL);
421 if (entropy_sih == NULL)
422 panic("unable to establish entropy softint");
423
424 /*
425 * Create the entropy housekeeping thread. Must happen after
426 * lwpinit.
427 */
428 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
429 entropy_thread, NULL, &entropy_lwp, "entbutler");
430 if (error)
431 panic("unable to create entropy housekeeping thread: %d",
432 error);
433
434 /*
435 * Wait until the per-CPU initialization has hit all CPUs
436 * before proceeding to mark the entropy system hot.
437 */
438 xc_barrier(XC_HIGHPRI);
439 E->stage = ENTROPY_HOT;
440 }
441
442 /*
443 * entropy_init_cpu(ptr, cookie, ci)
444 *
445 * percpu(9) constructor for per-CPU entropy pool.
446 */
447 static void
448 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
449 {
450 struct entropy_cpu *ec = ptr;
451
452 ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt),
453 KM_SLEEP);
454 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
455 ec->ec_pending = 0;
456 ec->ec_locked = false;
457
458 evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL,
459 ci->ci_cpuname, "entropy softint");
460 }
461
462 /*
463 * entropy_fini_cpu(ptr, cookie, ci)
464 *
465 * percpu(9) destructor for per-CPU entropy pool.
466 */
467 static void
468 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
469 {
470 struct entropy_cpu *ec = ptr;
471
472 /*
473 * Zero any lingering data. Disclosure of the per-CPU pool
474 * shouldn't retroactively affect the security of any keys
475 * generated, because entpool(9) erases whatever we have just
476 * drawn out of any pool, but better safe than sorry.
477 */
478 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
479
480 evcnt_detach(ec->ec_softint_evcnt);
481
482 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
483 kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt));
484 }
485
486 /*
487 * entropy_seed(seed)
488 *
489 * Seed the entropy pool with seed. Meant to be called as early
490 * as possible by the bootloader; may be called before or after
491 * entropy_init. Must be called before system reaches userland.
492 * Must be called in thread or soft interrupt context, not in hard
493 * interrupt context. Must be called at most once.
494 *
495 * Overwrites the seed in place. Caller may then free the memory.
496 */
497 static void
498 entropy_seed(rndsave_t *seed)
499 {
500 SHA1_CTX ctx;
501 uint8_t digest[SHA1_DIGEST_LENGTH];
502 bool seeded;
503
504 /*
505 * Verify the checksum. If the checksum fails, take the data
506 * but ignore the entropy estimate -- the file may have been
507 * incompletely written with garbage, which is harmless to add
508 * but may not be as unpredictable as alleged.
509 */
510 SHA1Init(&ctx);
511 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
512 SHA1Update(&ctx, seed->data, sizeof(seed->data));
513 SHA1Final(digest, &ctx);
514 CTASSERT(sizeof(seed->digest) == sizeof(digest));
515 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
516 printf("entropy: invalid seed checksum\n");
517 seed->entropy = 0;
518 }
519 explicit_memset(&ctx, 0, sizeof ctx);
520 explicit_memset(digest, 0, sizeof digest);
521
522 /*
523 * If the entropy is insensibly large, try byte-swapping.
524 * Otherwise assume the file is corrupted and act as though it
525 * has zero entropy.
526 */
527 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
528 seed->entropy = bswap32(seed->entropy);
529 if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
530 seed->entropy = 0;
531 }
532
533 /* Make sure the seed source is attached. */
534 attach_seed_rndsource();
535
536 /* Test and set E->seeded. */
537 if (E->stage >= ENTROPY_WARM)
538 mutex_enter(&E->lock);
539 seeded = E->seeded;
540 E->seeded = (seed->entropy > 0);
541 if (E->stage >= ENTROPY_WARM)
542 mutex_exit(&E->lock);
543
544 /*
545 * If we've been seeded, may be re-entering the same seed
546 * (e.g., bootloader vs module init, or something). No harm in
547 * entering it twice, but it contributes no additional entropy.
548 */
549 if (seeded) {
550 printf("entropy: double-seeded by bootloader\n");
551 seed->entropy = 0;
552 } else {
553 printf("entropy: entering seed from bootloader"
554 " with %u bits of entropy\n", (unsigned)seed->entropy);
555 }
556
557 /* Enter it into the pool and promptly zero it. */
558 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
559 seed->entropy);
560 explicit_memset(seed, 0, sizeof(*seed));
561 }
562
563 /*
564 * entropy_bootrequest()
565 *
566 * Request entropy from all sources at boot, once config is
567 * complete and interrupts are running.
568 */
569 void
570 entropy_bootrequest(void)
571 {
572
573 KASSERT(E->stage >= ENTROPY_WARM);
574
575 /*
576 * Request enough to satisfy the maximum entropy shortage.
577 * This is harmless overkill if the bootloader provided a seed.
578 */
579 mutex_enter(&E->lock);
580 entropy_request(ENTROPY_CAPACITY);
581 mutex_exit(&E->lock);
582 }
583
584 /*
585 * entropy_epoch()
586 *
587 * Returns the current entropy epoch. If this changes, you should
588 * reseed. If -1, means system entropy has not yet reached full
589 * entropy or been explicitly consolidated; never reverts back to
590 * -1. Never zero, so you can always use zero as an uninitialized
591 * sentinel value meaning `reseed ASAP'.
592 *
593 * Usage model:
594 *
595 * struct foo {
596 * struct crypto_prng prng;
597 * unsigned epoch;
598 * } *foo;
599 *
600 * unsigned epoch = entropy_epoch();
601 * if (__predict_false(epoch != foo->epoch)) {
602 * uint8_t seed[32];
603 * if (entropy_extract(seed, sizeof seed, 0) != 0)
604 * warn("no entropy");
605 * crypto_prng_reseed(&foo->prng, seed, sizeof seed);
606 * foo->epoch = epoch;
607 * }
608 */
609 unsigned
610 entropy_epoch(void)
611 {
612
613 /*
614 * Unsigned int, so no need for seqlock for an atomic read, but
615 * make sure we read it afresh each time.
616 */
617 return atomic_load_relaxed(&E->epoch);
618 }
619
620 /*
621 * entropy_ready()
622 *
623 * True if the entropy pool has full entropy.
624 */
625 bool
626 entropy_ready(void)
627 {
628
629 return atomic_load_relaxed(&E->needed) == 0;
630 }
631
632 /*
633 * entropy_account_cpu(ec)
634 *
635 * Consider whether to consolidate entropy into the global pool
636 * after we just added some into the current CPU's pending pool.
637 *
638 * - If this CPU can provide enough entropy now, do so.
639 *
640 * - If this and whatever else is available on other CPUs can
641 * provide enough entropy, kick the consolidation thread.
642 *
643 * - Otherwise, do as little as possible, except maybe consolidate
644 * entropy at most once a minute.
645 *
646 * Caller must be bound to a CPU and therefore have exclusive
647 * access to ec. Will acquire and release the global lock.
648 */
649 static void
650 entropy_account_cpu(struct entropy_cpu *ec)
651 {
652 unsigned diff;
653
654 KASSERT(E->stage == ENTROPY_HOT);
655
656 /*
657 * If there's no entropy needed, and entropy has been
658 * consolidated in the last minute, do nothing.
659 */
660 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
661 __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
662 __predict_true((time_uptime - E->timestamp) <= 60))
663 return;
664
665 /* If there's nothing pending, stop here. */
666 if (ec->ec_pending == 0)
667 return;
668
669 /* Consider consolidation, under the lock. */
670 mutex_enter(&E->lock);
671 if (E->needed != 0 && E->needed <= ec->ec_pending) {
672 /*
673 * If we have not yet attained full entropy but we can
674 * now, do so. This way we disseminate entropy
675 * promptly when it becomes available early at boot;
676 * otherwise we leave it to the entropy consolidation
677 * thread, which is rate-limited to mitigate side
678 * channels and abuse.
679 */
680 uint8_t buf[ENTPOOL_CAPACITY];
681
682 /* Transfer from the local pool to the global pool. */
683 entpool_extract(ec->ec_pool, buf, sizeof buf);
684 entpool_enter(&E->pool, buf, sizeof buf);
685 atomic_store_relaxed(&ec->ec_pending, 0);
686 atomic_store_relaxed(&E->needed, 0);
687
688 /* Notify waiters that we now have full entropy. */
689 entropy_notify();
690 entropy_immediate_evcnt.ev_count++;
691 } else {
692 /* Record how much we can add to the global pool. */
693 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending);
694 E->pending += diff;
695 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff);
696
697 /*
698 * This should have made a difference unless we were
699 * already saturated.
700 */
701 KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY);
702 KASSERT(E->pending);
703
704 if (E->needed <= E->pending) {
705 /*
706 * Enough entropy between all the per-CPU
707 * pools. Wake up the housekeeping thread.
708 *
709 * If we don't need any entropy, this doesn't
710 * mean much, but it is the only time we ever
711 * gather additional entropy in case the
712 * accounting has been overly optimistic. This
713 * happens at most once a minute, so there's
714 * negligible performance cost.
715 */
716 E->consolidate = true;
717 cv_broadcast(&E->cv);
718 if (E->needed == 0)
719 entropy_discretionary_evcnt.ev_count++;
720 } else {
721 /* Can't get full entropy. Keep gathering. */
722 entropy_partial_evcnt.ev_count++;
723 }
724 }
725 mutex_exit(&E->lock);
726 }
727
728 /*
729 * entropy_enter_early(buf, len, nbits)
730 *
731 * Do entropy bookkeeping globally, before we have established
732 * per-CPU pools. Enter directly into the global pool in the hope
733 * that we enter enough before the first entropy_extract to thwart
734 * iterative-guessing attacks; entropy_extract will warn if not.
735 */
736 static void
737 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
738 {
739 bool notify = false;
740
741 if (E->stage >= ENTROPY_WARM)
742 mutex_enter(&E->lock);
743
744 /* Enter it into the pool. */
745 entpool_enter(&E->pool, buf, len);
746
747 /*
748 * Decide whether to notify reseed -- we will do so if either:
749 * (a) we transition from partial entropy to full entropy, or
750 * (b) we get a batch of full entropy all at once.
751 */
752 notify |= (E->needed && E->needed <= nbits);
753 notify |= (nbits >= ENTROPY_CAPACITY*NBBY);
754
755 /* Subtract from the needed count and notify if appropriate. */
756 E->needed -= MIN(E->needed, nbits);
757 if (notify) {
758 entropy_notify();
759 entropy_immediate_evcnt.ev_count++;
760 }
761
762 if (E->stage >= ENTROPY_WARM)
763 mutex_exit(&E->lock);
764 }
765
766 /*
767 * entropy_enter(buf, len, nbits)
768 *
769 * Enter len bytes of data from buf into the system's entropy
770 * pool, stirring as necessary when the internal buffer fills up.
771 * nbits is a lower bound on the number of bits of entropy in the
772 * process that led to this sample.
773 */
774 static void
775 entropy_enter(const void *buf, size_t len, unsigned nbits)
776 {
777 struct entropy_cpu *ec;
778 uint32_t pending;
779 int s;
780
781 KASSERTMSG(!cpu_intr_p(),
782 "use entropy_enter_intr from interrupt context");
783 KASSERTMSG(howmany(nbits, NBBY) <= len,
784 "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
785
786 /* If it's too early after boot, just use entropy_enter_early. */
787 if (__predict_false(E->stage < ENTROPY_HOT)) {
788 entropy_enter_early(buf, len, nbits);
789 return;
790 }
791
792 /*
793 * Acquire the per-CPU state, blocking soft interrupts and
794 * causing hard interrupts to drop samples on the floor.
795 */
796 ec = percpu_getref(entropy_percpu);
797 s = splsoftserial();
798 KASSERT(!ec->ec_locked);
799 ec->ec_locked = true;
800 __insn_barrier();
801
802 /* Enter into the per-CPU pool. */
803 entpool_enter(ec->ec_pool, buf, len);
804
805 /* Count up what we can add. */
806 pending = ec->ec_pending;
807 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
808 atomic_store_relaxed(&ec->ec_pending, pending);
809
810 /* Consolidate globally if appropriate based on what we added. */
811 entropy_account_cpu(ec);
812
813 /* Release the per-CPU state. */
814 KASSERT(ec->ec_locked);
815 __insn_barrier();
816 ec->ec_locked = false;
817 splx(s);
818 percpu_putref(entropy_percpu);
819 }
820
821 /*
822 * entropy_enter_intr(buf, len, nbits)
823 *
824 * Enter up to len bytes of data from buf into the system's
825 * entropy pool without stirring. nbits is a lower bound on the
826 * number of bits of entropy in the process that led to this
827 * sample. If the sample could be entered completely, assume
828 * nbits of entropy pending; otherwise assume none, since we don't
829 * know whether some parts of the sample are constant, for
830 * instance. Schedule a softint to stir the entropy pool if
831 * needed. Return true if used fully, false if truncated at all.
832 *
833 * Using this in thread context will work, but you might as well
834 * use entropy_enter in that case.
835 */
836 static bool
837 entropy_enter_intr(const void *buf, size_t len, unsigned nbits)
838 {
839 struct entropy_cpu *ec;
840 bool fullyused = false;
841 uint32_t pending;
842
843 KASSERTMSG(howmany(nbits, NBBY) <= len,
844 "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
845
846 /* If it's too early after boot, just use entropy_enter_early. */
847 if (__predict_false(E->stage < ENTROPY_HOT)) {
848 entropy_enter_early(buf, len, nbits);
849 return true;
850 }
851
852 /*
853 * Acquire the per-CPU state. If someone is in the middle of
854 * using it, drop the sample. Otherwise, take the lock so that
855 * higher-priority interrupts will drop their samples.
856 */
857 ec = percpu_getref(entropy_percpu);
858 if (ec->ec_locked)
859 goto out0;
860 ec->ec_locked = true;
861 __insn_barrier();
862
863 /*
864 * Enter as much as we can into the per-CPU pool. If it was
865 * truncated, schedule a softint to stir the pool and stop.
866 */
867 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
868 softint_schedule(entropy_sih);
869 goto out1;
870 }
871 fullyused = true;
872
873 /* Count up what we can contribute. */
874 pending = ec->ec_pending;
875 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
876 atomic_store_relaxed(&ec->ec_pending, pending);
877
878 /* Schedule a softint if we added anything and it matters. */
879 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
880 atomic_load_relaxed(&entropy_depletion)) &&
881 nbits != 0)
882 softint_schedule(entropy_sih);
883
884 out1: /* Release the per-CPU state. */
885 KASSERT(ec->ec_locked);
886 __insn_barrier();
887 ec->ec_locked = false;
888 out0: percpu_putref(entropy_percpu);
889
890 return fullyused;
891 }
892
893 /*
894 * entropy_softintr(cookie)
895 *
896 * Soft interrupt handler for entering entropy. Takes care of
897 * stirring the local CPU's entropy pool if it filled up during
898 * hard interrupts, and promptly crediting entropy from the local
899 * CPU's entropy pool to the global entropy pool if needed.
900 */
901 static void
902 entropy_softintr(void *cookie)
903 {
904 struct entropy_cpu *ec;
905
906 /*
907 * Acquire the per-CPU state. Other users can lock this only
908 * while soft interrupts are blocked. Cause hard interrupts to
909 * drop samples on the floor.
910 */
911 ec = percpu_getref(entropy_percpu);
912 KASSERT(!ec->ec_locked);
913 ec->ec_locked = true;
914 __insn_barrier();
915
916 /* Count statistics. */
917 ec->ec_softint_evcnt->ev_count++;
918
919 /* Stir the pool if necessary. */
920 entpool_stir(ec->ec_pool);
921
922 /* Consolidate globally if appropriate based on what we added. */
923 entropy_account_cpu(ec);
924
925 /* Release the per-CPU state. */
926 KASSERT(ec->ec_locked);
927 __insn_barrier();
928 ec->ec_locked = false;
929 percpu_putref(entropy_percpu);
930 }
931
932 /*
933 * entropy_thread(cookie)
934 *
935 * Handle any asynchronous entropy housekeeping.
936 */
937 static void
938 entropy_thread(void *cookie)
939 {
940 bool consolidate;
941
942 for (;;) {
943 /*
944 * Wait until there's full entropy somewhere among the
945 * CPUs, as confirmed at most once per minute, or
946 * someone wants to consolidate.
947 */
948 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) {
949 consolidate = true;
950 } else {
951 mutex_enter(&E->lock);
952 if (!E->consolidate)
953 cv_timedwait(&E->cv, &E->lock, 60*hz);
954 consolidate = E->consolidate;
955 E->consolidate = false;
956 mutex_exit(&E->lock);
957 }
958
959 if (consolidate) {
960 /* Do it. */
961 entropy_do_consolidate();
962
963 /* Mitigate abuse. */
964 kpause("entropy", false, hz, NULL);
965 }
966 }
967 }
968
969 /*
970 * entropy_pending()
971 *
972 * Count up the amount of entropy pending on other CPUs.
973 */
974 static uint32_t
975 entropy_pending(void)
976 {
977 uint32_t pending = 0;
978
979 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending);
980 return pending;
981 }
982
983 static void
984 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
985 {
986 struct entropy_cpu *ec = ptr;
987 uint32_t *pendingp = cookie;
988 uint32_t cpu_pending;
989
990 cpu_pending = atomic_load_relaxed(&ec->ec_pending);
991 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending);
992 }
993
994 /*
995 * entropy_do_consolidate()
996 *
997 * Issue a cross-call to gather entropy on all CPUs and advance
998 * the entropy epoch.
999 */
1000 static void
1001 entropy_do_consolidate(void)
1002 {
1003 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1004 static struct timeval lasttime; /* serialized by E->lock */
1005 struct entpool pool;
1006 uint8_t buf[ENTPOOL_CAPACITY];
1007 unsigned diff;
1008 uint64_t ticket;
1009
1010 /* Gather entropy on all CPUs into a temporary pool. */
1011 memset(&pool, 0, sizeof pool);
1012 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
1013 xc_wait(ticket);
1014
1015 /* Acquire the lock to notify waiters. */
1016 mutex_enter(&E->lock);
1017
1018 /* Count another consolidation. */
1019 entropy_consolidate_evcnt.ev_count++;
1020
1021 /* Note when we last consolidated, i.e. now. */
1022 E->timestamp = time_uptime;
1023
1024 /* Mix what we gathered into the global pool. */
1025 entpool_extract(&pool, buf, sizeof buf);
1026 entpool_enter(&E->pool, buf, sizeof buf);
1027 explicit_memset(&pool, 0, sizeof pool);
1028
1029 /* Count the entropy that was gathered. */
1030 diff = MIN(E->needed, E->pending);
1031 atomic_store_relaxed(&E->needed, E->needed - diff);
1032 E->pending -= diff;
1033 if (__predict_false(E->needed > 0)) {
1034 if (ratecheck(&lasttime, &interval))
1035 log(LOG_DEBUG, "entropy: WARNING:"
1036 " consolidating less than full entropy\n");
1037 }
1038
1039 /* Advance the epoch and notify waiters. */
1040 entropy_notify();
1041
1042 /* Release the lock. */
1043 mutex_exit(&E->lock);
1044 }
1045
1046 /*
1047 * entropy_consolidate_xc(vpool, arg2)
1048 *
1049 * Extract output from the local CPU's input pool and enter it
1050 * into a temporary pool passed as vpool.
1051 */
1052 static void
1053 entropy_consolidate_xc(void *vpool, void *arg2 __unused)
1054 {
1055 struct entpool *pool = vpool;
1056 struct entropy_cpu *ec;
1057 uint8_t buf[ENTPOOL_CAPACITY];
1058 uint32_t extra[7];
1059 unsigned i = 0;
1060 int s;
1061
1062 /* Grab CPU number and cycle counter to mix extra into the pool. */
1063 extra[i++] = cpu_number();
1064 extra[i++] = entropy_timer();
1065
1066 /*
1067 * Acquire the per-CPU state, blocking soft interrupts and
1068 * discarding entropy in hard interrupts, so that we can
1069 * extract from the per-CPU pool.
1070 */
1071 ec = percpu_getref(entropy_percpu);
1072 s = splsoftserial();
1073 KASSERT(!ec->ec_locked);
1074 ec->ec_locked = true;
1075 __insn_barrier();
1076 extra[i++] = entropy_timer();
1077
1078 /* Extract the data and count it no longer pending. */
1079 entpool_extract(ec->ec_pool, buf, sizeof buf);
1080 atomic_store_relaxed(&ec->ec_pending, 0);
1081 extra[i++] = entropy_timer();
1082
1083 /* Release the per-CPU state. */
1084 KASSERT(ec->ec_locked);
1085 __insn_barrier();
1086 ec->ec_locked = false;
1087 splx(s);
1088 percpu_putref(entropy_percpu);
1089 extra[i++] = entropy_timer();
1090
1091 /*
1092 * Copy over statistics, and enter the per-CPU extract and the
1093 * extra timing into the temporary pool, under the global lock.
1094 */
1095 mutex_enter(&E->lock);
1096 extra[i++] = entropy_timer();
1097 entpool_enter(pool, buf, sizeof buf);
1098 explicit_memset(buf, 0, sizeof buf);
1099 extra[i++] = entropy_timer();
1100 KASSERT(i == __arraycount(extra));
1101 entpool_enter(pool, extra, sizeof extra);
1102 explicit_memset(extra, 0, sizeof extra);
1103 mutex_exit(&E->lock);
1104 }
1105
1106 /*
1107 * entropy_notify()
1108 *
1109 * Caller just contributed entropy to the global pool. Advance
1110 * the entropy epoch and notify waiters.
1111 *
1112 * Caller must hold the global entropy lock. Except for the
1113 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must
1114 * have just have transitioned from partial entropy to full
1115 * entropy -- E->needed should be zero now.
1116 */
1117 static void
1118 entropy_notify(void)
1119 {
1120 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1121 static struct timeval lasttime; /* serialized by E->lock */
1122 unsigned epoch;
1123
1124 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1125
1126 /*
1127 * If this is the first time, print a message to the console
1128 * that we're ready so operators can compare it to the timing
1129 * of other events.
1130 */
1131 if (__predict_false(!rnd_initial_entropy) && E->needed == 0) {
1132 printf("entropy: ready\n");
1133 rnd_initial_entropy = 1;
1134 }
1135
1136 /* Set the epoch; roll over from UINTMAX-1 to 1. */
1137 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
1138 ratecheck(&lasttime, &interval)) {
1139 epoch = E->epoch + 1;
1140 if (epoch == 0 || epoch == (unsigned)-1)
1141 epoch = 1;
1142 atomic_store_relaxed(&E->epoch, epoch);
1143 }
1144
1145 /* Notify waiters. */
1146 if (E->stage >= ENTROPY_WARM) {
1147 cv_broadcast(&E->cv);
1148 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
1149 }
1150
1151 /* Count another notification. */
1152 entropy_notify_evcnt.ev_count++;
1153 }
1154
1155 /*
1156 * entropy_consolidate()
1157 *
1158 * Trigger entropy consolidation and wait for it to complete.
1159 *
1160 * This should be used sparingly, not periodically -- requiring
1161 * conscious intervention by the operator or a clear policy
1162 * decision. Otherwise, the kernel will automatically consolidate
1163 * when enough entropy has been gathered into per-CPU pools to
1164 * transition to full entropy.
1165 */
1166 void
1167 entropy_consolidate(void)
1168 {
1169 uint64_t ticket;
1170 int error;
1171
1172 KASSERT(E->stage == ENTROPY_HOT);
1173
1174 mutex_enter(&E->lock);
1175 ticket = entropy_consolidate_evcnt.ev_count;
1176 E->consolidate = true;
1177 cv_broadcast(&E->cv);
1178 while (ticket == entropy_consolidate_evcnt.ev_count) {
1179 error = cv_wait_sig(&E->cv, &E->lock);
1180 if (error)
1181 break;
1182 }
1183 mutex_exit(&E->lock);
1184 }
1185
1186 /*
1187 * sysctl -w kern.entropy.consolidate=1
1188 *
1189 * Trigger entropy consolidation and wait for it to complete.
1190 * Writable only by superuser. This, writing to /dev/random, and
1191 * ioctl(RNDADDDATA) are the only ways for the system to
1192 * consolidate entropy if the operator knows something the kernel
1193 * doesn't about how unpredictable the pending entropy pools are.
1194 */
1195 static int
1196 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
1197 {
1198 struct sysctlnode node = *rnode;
1199 int arg;
1200 int error;
1201
1202 KASSERT(E->stage == ENTROPY_HOT);
1203
1204 node.sysctl_data = &arg;
1205 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1206 if (error || newp == NULL)
1207 return error;
1208 if (arg)
1209 entropy_consolidate();
1210
1211 return error;
1212 }
1213
1214 /*
1215 * sysctl -w kern.entropy.gather=1
1216 *
1217 * Trigger gathering entropy from all on-demand sources, and wait
1218 * for synchronous sources (but not asynchronous sources) to
1219 * complete. Writable only by superuser.
1220 */
1221 static int
1222 sysctl_entropy_gather(SYSCTLFN_ARGS)
1223 {
1224 struct sysctlnode node = *rnode;
1225 int arg;
1226 int error;
1227
1228 KASSERT(E->stage == ENTROPY_HOT);
1229
1230 node.sysctl_data = &arg;
1231 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1232 if (error || newp == NULL)
1233 return error;
1234 if (arg) {
1235 mutex_enter(&E->lock);
1236 entropy_request(ENTROPY_CAPACITY);
1237 mutex_exit(&E->lock);
1238 }
1239
1240 return 0;
1241 }
1242
1243 /*
1244 * entropy_extract(buf, len, flags)
1245 *
1246 * Extract len bytes from the global entropy pool into buf.
1247 *
1248 * Flags may have:
1249 *
1250 * ENTROPY_WAIT Wait for entropy if not available yet.
1251 * ENTROPY_SIG Allow interruption by a signal during wait.
1252 * ENTROPY_HARDFAIL Either fill the buffer with full entropy,
1253 * or fail without filling it at all.
1254 *
1255 * Return zero on success, or error on failure:
1256 *
1257 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set.
1258 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted.
1259 *
1260 * If ENTROPY_WAIT is set, allowed only in thread context. If
1261 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's
1262 * awfully high... Do we really need it in hard interrupts? This
1263 * arises from use of cprng_strong(9).)
1264 */
1265 int
1266 entropy_extract(void *buf, size_t len, int flags)
1267 {
1268 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1269 static struct timeval lasttime; /* serialized by E->lock */
1270 int error;
1271
1272 if (ISSET(flags, ENTROPY_WAIT)) {
1273 ASSERT_SLEEPABLE();
1274 KASSERTMSG(E->stage >= ENTROPY_WARM,
1275 "can't wait for entropy until warm");
1276 }
1277
1278 /* Acquire the global lock to get at the global pool. */
1279 if (E->stage >= ENTROPY_WARM)
1280 mutex_enter(&E->lock);
1281
1282 /* Count up request for entropy in interrupt context. */
1283 if (cpu_intr_p())
1284 entropy_extract_intr_evcnt.ev_count++;
1285
1286 /* Wait until there is enough entropy in the system. */
1287 error = 0;
1288 while (E->needed) {
1289 /* Ask for more, synchronously if possible. */
1290 entropy_request(len);
1291
1292 /* If we got enough, we're done. */
1293 if (E->needed == 0) {
1294 KASSERT(error == 0);
1295 break;
1296 }
1297
1298 /* If not waiting, stop here. */
1299 if (!ISSET(flags, ENTROPY_WAIT)) {
1300 error = EWOULDBLOCK;
1301 break;
1302 }
1303
1304 /* Wait for some entropy to come in and try again. */
1305 KASSERT(E->stage >= ENTROPY_WARM);
1306 printf("entropy: pid %d (%s) blocking due to lack of entropy\n",
1307 curproc->p_pid, curproc->p_comm);
1308
1309 if (ISSET(flags, ENTROPY_SIG)) {
1310 error = cv_wait_sig(&E->cv, &E->lock);
1311 if (error)
1312 break;
1313 } else {
1314 cv_wait(&E->cv, &E->lock);
1315 }
1316 }
1317
1318 /*
1319 * Count failure -- but fill the buffer nevertheless, unless
1320 * the caller specified ENTROPY_HARDFAIL.
1321 */
1322 if (error) {
1323 if (ISSET(flags, ENTROPY_HARDFAIL))
1324 goto out;
1325 entropy_extract_fail_evcnt.ev_count++;
1326 }
1327
1328 /*
1329 * Report a warning if we have never yet reached full entropy.
1330 * This is the only case where we consider entropy to be
1331 * `depleted' without kern.entropy.depletion enabled -- when we
1332 * only have partial entropy, an adversary may be able to
1333 * narrow the state of the pool down to a small number of
1334 * possibilities; the output then enables them to confirm a
1335 * guess, reducing its entropy from the adversary's perspective
1336 * to zero.
1337 */
1338 if (__predict_false(E->epoch == (unsigned)-1)) {
1339 if (ratecheck(&lasttime, &interval))
1340 printf("entropy: WARNING:"
1341 " extracting entropy too early\n");
1342 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY);
1343 }
1344
1345 /* Extract data from the pool, and `deplete' if we're doing that. */
1346 entpool_extract(&E->pool, buf, len);
1347 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
1348 error == 0) {
1349 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
1350
1351 atomic_store_relaxed(&E->needed,
1352 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost));
1353 entropy_deplete_evcnt.ev_count++;
1354 }
1355
1356 out: /* Release the global lock and return the error. */
1357 if (E->stage >= ENTROPY_WARM)
1358 mutex_exit(&E->lock);
1359 return error;
1360 }
1361
1362 /*
1363 * entropy_poll(events)
1364 *
1365 * Return the subset of events ready, and if it is not all of
1366 * events, record curlwp as waiting for entropy.
1367 */
1368 int
1369 entropy_poll(int events)
1370 {
1371 int revents = 0;
1372
1373 KASSERT(E->stage >= ENTROPY_WARM);
1374
1375 /* Always ready for writing. */
1376 revents |= events & (POLLOUT|POLLWRNORM);
1377
1378 /* Narrow it down to reads. */
1379 events &= POLLIN|POLLRDNORM;
1380 if (events == 0)
1381 return revents;
1382
1383 /*
1384 * If we have reached full entropy and we're not depleting
1385 * entropy, we are forever ready.
1386 */
1387 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
1388 __predict_true(!atomic_load_relaxed(&entropy_depletion)))
1389 return revents | events;
1390
1391 /*
1392 * Otherwise, check whether we need entropy under the lock. If
1393 * we don't, we're ready; if we do, add ourselves to the queue.
1394 */
1395 mutex_enter(&E->lock);
1396 if (E->needed == 0)
1397 revents |= events;
1398 else
1399 selrecord(curlwp, &E->selq);
1400 mutex_exit(&E->lock);
1401
1402 return revents;
1403 }
1404
1405 /*
1406 * filt_entropy_read_detach(kn)
1407 *
1408 * struct filterops::f_detach callback for entropy read events:
1409 * remove kn from the list of waiters.
1410 */
1411 static void
1412 filt_entropy_read_detach(struct knote *kn)
1413 {
1414
1415 KASSERT(E->stage >= ENTROPY_WARM);
1416
1417 mutex_enter(&E->lock);
1418 selremove_knote(&E->selq, kn);
1419 mutex_exit(&E->lock);
1420 }
1421
1422 /*
1423 * filt_entropy_read_event(kn, hint)
1424 *
1425 * struct filterops::f_event callback for entropy read events:
1426 * poll for entropy. Caller must hold the global entropy lock if
1427 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
1428 */
1429 static int
1430 filt_entropy_read_event(struct knote *kn, long hint)
1431 {
1432 int ret;
1433
1434 KASSERT(E->stage >= ENTROPY_WARM);
1435
1436 /* Acquire the lock, if caller is outside entropy subsystem. */
1437 if (hint == NOTE_SUBMIT)
1438 KASSERT(mutex_owned(&E->lock));
1439 else
1440 mutex_enter(&E->lock);
1441
1442 /*
1443 * If we still need entropy, can't read anything; if not, can
1444 * read arbitrarily much.
1445 */
1446 if (E->needed != 0) {
1447 ret = 0;
1448 } else {
1449 if (atomic_load_relaxed(&entropy_depletion))
1450 kn->kn_data = ENTROPY_CAPACITY*NBBY;
1451 else
1452 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
1453 ret = 1;
1454 }
1455
1456 /* Release the lock, if caller is outside entropy subsystem. */
1457 if (hint == NOTE_SUBMIT)
1458 KASSERT(mutex_owned(&E->lock));
1459 else
1460 mutex_exit(&E->lock);
1461
1462 return ret;
1463 }
1464
1465 static const struct filterops entropy_read_filtops = {
1466 .f_isfd = 1, /* XXX Makes sense only for /dev/u?random. */
1467 .f_attach = NULL,
1468 .f_detach = filt_entropy_read_detach,
1469 .f_event = filt_entropy_read_event,
1470 };
1471
1472 /*
1473 * entropy_kqfilter(kn)
1474 *
1475 * Register kn to receive entropy event notifications. May be
1476 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
1477 */
1478 int
1479 entropy_kqfilter(struct knote *kn)
1480 {
1481
1482 KASSERT(E->stage >= ENTROPY_WARM);
1483
1484 switch (kn->kn_filter) {
1485 case EVFILT_READ:
1486 /* Enter into the global select queue. */
1487 mutex_enter(&E->lock);
1488 kn->kn_fop = &entropy_read_filtops;
1489 selrecord_knote(&E->selq, kn);
1490 mutex_exit(&E->lock);
1491 return 0;
1492 case EVFILT_WRITE:
1493 /* Can always dump entropy into the system. */
1494 kn->kn_fop = &seltrue_filtops;
1495 return 0;
1496 default:
1497 return EINVAL;
1498 }
1499 }
1500
1501 /*
1502 * rndsource_setcb(rs, get, getarg)
1503 *
1504 * Set the request callback for the entropy source rs, if it can
1505 * provide entropy on demand. Must precede rnd_attach_source.
1506 */
1507 void
1508 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
1509 void *getarg)
1510 {
1511
1512 rs->get = get;
1513 rs->getarg = getarg;
1514 }
1515
1516 /*
1517 * rnd_attach_source(rs, name, type, flags)
1518 *
1519 * Attach the entropy source rs. Must be done after
1520 * rndsource_setcb, if any, and before any calls to rnd_add_data.
1521 */
1522 void
1523 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
1524 uint32_t flags)
1525 {
1526 uint32_t extra[4];
1527 unsigned i = 0;
1528
1529 /* Grab cycle counter to mix extra into the pool. */
1530 extra[i++] = entropy_timer();
1531
1532 /*
1533 * Apply some standard flags:
1534 *
1535 * - We do not bother with network devices by default, for
1536 * hysterical raisins (perhaps: because it is often the case
1537 * that an adversary can influence network packet timings).
1538 */
1539 switch (type) {
1540 case RND_TYPE_NET:
1541 flags |= RND_FLAG_NO_COLLECT;
1542 break;
1543 }
1544
1545 /* Sanity-check the callback if RND_FLAG_HASCB is set. */
1546 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
1547
1548 /* Initialize the random source. */
1549 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
1550 strlcpy(rs->name, name, sizeof(rs->name));
1551 memset(&rs->time_delta, 0, sizeof(rs->time_delta));
1552 memset(&rs->value_delta, 0, sizeof(rs->value_delta));
1553 rs->total = 0;
1554 rs->type = type;
1555 rs->flags = flags;
1556 if (E->stage >= ENTROPY_WARM)
1557 rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
1558 extra[i++] = entropy_timer();
1559
1560 /* Wire it into the global list of random sources. */
1561 if (E->stage >= ENTROPY_WARM)
1562 mutex_enter(&E->lock);
1563 LIST_INSERT_HEAD(&E->sources, rs, list);
1564 if (E->stage >= ENTROPY_WARM)
1565 mutex_exit(&E->lock);
1566 extra[i++] = entropy_timer();
1567
1568 /* Request that it provide entropy ASAP, if we can. */
1569 if (ISSET(flags, RND_FLAG_HASCB))
1570 (*rs->get)(ENTROPY_CAPACITY, rs->getarg);
1571 extra[i++] = entropy_timer();
1572
1573 /* Mix the extra into the pool. */
1574 KASSERT(i == __arraycount(extra));
1575 entropy_enter(extra, sizeof extra, 0);
1576 explicit_memset(extra, 0, sizeof extra);
1577 }
1578
1579 /*
1580 * rnd_detach_source(rs)
1581 *
1582 * Detach the entropy source rs. May sleep waiting for users to
1583 * drain. Further use is not allowed.
1584 */
1585 void
1586 rnd_detach_source(struct krndsource *rs)
1587 {
1588
1589 /*
1590 * If we're cold (shouldn't happen, but hey), just remove it
1591 * from the list -- there's nothing allocated.
1592 */
1593 if (E->stage == ENTROPY_COLD) {
1594 LIST_REMOVE(rs, list);
1595 return;
1596 }
1597
1598 /* We may have to wait for entropy_request. */
1599 ASSERT_SLEEPABLE();
1600
1601 /* Wait until the source list is not in use, and remove it. */
1602 mutex_enter(&E->lock);
1603 while (E->sourcelock)
1604 cv_wait(&E->sourcelock_cv, &E->lock);
1605 LIST_REMOVE(rs, list);
1606 mutex_exit(&E->lock);
1607
1608 /* Free the per-CPU data. */
1609 percpu_free(rs->state, sizeof(struct rndsource_cpu));
1610 }
1611
1612 /*
1613 * rnd_lock_sources()
1614 *
1615 * Prevent changes to the list of rndsources while we iterate it.
1616 * Interruptible. Caller must hold the global entropy lock. If
1617 * successful, no rndsource will go away until rnd_unlock_sources
1618 * even while the caller releases the global entropy lock.
1619 */
1620 static int
1621 rnd_lock_sources(void)
1622 {
1623 int error;
1624
1625 KASSERT(mutex_owned(&E->lock));
1626
1627 while (E->sourcelock) {
1628 error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
1629 if (error)
1630 return error;
1631 }
1632
1633 E->sourcelock = curlwp;
1634 return 0;
1635 }
1636
1637 /*
1638 * rnd_trylock_sources()
1639 *
1640 * Try to lock the list of sources, but if it's already locked,
1641 * fail. Caller must hold the global entropy lock. If
1642 * successful, no rndsource will go away until rnd_unlock_sources
1643 * even while the caller releases the global entropy lock.
1644 */
1645 static bool
1646 rnd_trylock_sources(void)
1647 {
1648
1649 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1650
1651 if (E->sourcelock)
1652 return false;
1653 E->sourcelock = curlwp;
1654 return true;
1655 }
1656
1657 /*
1658 * rnd_unlock_sources()
1659 *
1660 * Unlock the list of sources after rnd_lock_sources or
1661 * rnd_trylock_sources. Caller must hold the global entropy lock.
1662 */
1663 static void
1664 rnd_unlock_sources(void)
1665 {
1666
1667 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1668
1669 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
1670 curlwp, E->sourcelock);
1671 E->sourcelock = NULL;
1672 if (E->stage >= ENTROPY_WARM)
1673 cv_signal(&E->sourcelock_cv);
1674 }
1675
1676 /*
1677 * rnd_sources_locked()
1678 *
1679 * True if we hold the list of rndsources locked, for diagnostic
1680 * assertions.
1681 */
1682 static bool __diagused
1683 rnd_sources_locked(void)
1684 {
1685
1686 return E->sourcelock == curlwp;
1687 }
1688
1689 /*
1690 * entropy_request(nbytes)
1691 *
1692 * Request nbytes bytes of entropy from all sources in the system.
1693 * OK if we overdo it. Caller must hold the global entropy lock;
1694 * will release and re-acquire it.
1695 */
1696 static void
1697 entropy_request(size_t nbytes)
1698 {
1699 struct krndsource *rs;
1700
1701 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1702
1703 /*
1704 * If there is a request in progress, let it proceed.
1705 * Otherwise, note that a request is in progress to avoid
1706 * reentry and to block rnd_detach_source until we're done.
1707 */
1708 if (!rnd_trylock_sources())
1709 return;
1710 entropy_request_evcnt.ev_count++;
1711
1712 /* Clamp to the maximum reasonable request. */
1713 nbytes = MIN(nbytes, ENTROPY_CAPACITY);
1714
1715 /* Walk the list of sources. */
1716 LIST_FOREACH(rs, &E->sources, list) {
1717 /* Skip sources without callbacks. */
1718 if (!ISSET(rs->flags, RND_FLAG_HASCB))
1719 continue;
1720
1721 /*
1722 * Skip sources that are disabled altogether -- we
1723 * would just ignore their samples anyway.
1724 */
1725 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
1726 continue;
1727
1728 /* Drop the lock while we call the callback. */
1729 if (E->stage >= ENTROPY_WARM)
1730 mutex_exit(&E->lock);
1731 (*rs->get)(nbytes, rs->getarg);
1732 if (E->stage >= ENTROPY_WARM)
1733 mutex_enter(&E->lock);
1734 }
1735
1736 /* Notify rnd_detach_source that the request is done. */
1737 rnd_unlock_sources();
1738 }
1739
1740 /*
1741 * rnd_add_uint32(rs, value)
1742 *
1743 * Enter 32 bits of data from an entropy source into the pool.
1744 *
1745 * If rs is NULL, may not be called from interrupt context.
1746 *
1747 * If rs is non-NULL, may be called from any context. May drop
1748 * data if called from interrupt context.
1749 */
1750 void
1751 rnd_add_uint32(struct krndsource *rs, uint32_t value)
1752 {
1753
1754 rnd_add_data(rs, &value, sizeof value, 0);
1755 }
1756
1757 void
1758 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
1759 {
1760
1761 rnd_add_data(rs, &value, sizeof value, 0);
1762 }
1763
1764 void
1765 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
1766 {
1767
1768 rnd_add_data(rs, &value, sizeof value, 0);
1769 }
1770
1771 /*
1772 * rnd_add_data(rs, buf, len, entropybits)
1773 *
1774 * Enter data from an entropy source into the pool, with a
1775 * driver's estimate of how much entropy the physical source of
1776 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's
1777 * estimate and treat it as zero.
1778 *
1779 * If rs is NULL, may not be called from interrupt context.
1780 *
1781 * If rs is non-NULL, may be called from any context. May drop
1782 * data if called from interrupt context.
1783 */
1784 void
1785 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
1786 uint32_t entropybits)
1787 {
1788 uint32_t extra;
1789 uint32_t flags;
1790
1791 KASSERTMSG(howmany(entropybits, NBBY) <= len,
1792 "%s: impossible entropy rate:"
1793 " %"PRIu32" bits in %"PRIu32"-byte string",
1794 rs ? rs->name : "(anonymous)", entropybits, len);
1795
1796 /* If there's no rndsource, just enter the data and time now. */
1797 if (rs == NULL) {
1798 entropy_enter(buf, len, entropybits);
1799 extra = entropy_timer();
1800 entropy_enter(&extra, sizeof extra, 0);
1801 explicit_memset(&extra, 0, sizeof extra);
1802 return;
1803 }
1804
1805 /* Load a snapshot of the flags. Ioctl may change them under us. */
1806 flags = atomic_load_relaxed(&rs->flags);
1807
1808 /*
1809 * Skip if:
1810 * - we're not collecting entropy, or
1811 * - the operator doesn't want to collect entropy from this, or
1812 * - neither data nor timings are being collected from this.
1813 */
1814 if (!atomic_load_relaxed(&entropy_collection) ||
1815 ISSET(flags, RND_FLAG_NO_COLLECT) ||
1816 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
1817 return;
1818
1819 /* If asked, ignore the estimate. */
1820 if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
1821 entropybits = 0;
1822
1823 /* If we are collecting data, enter them. */
1824 if (ISSET(flags, RND_FLAG_COLLECT_VALUE))
1825 rnd_add_data_1(rs, buf, len, entropybits,
1826 RND_FLAG_COLLECT_VALUE);
1827
1828 /* If we are collecting timings, enter one. */
1829 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
1830 extra = entropy_timer();
1831 rnd_add_data_1(rs, &extra, sizeof extra, 0,
1832 RND_FLAG_COLLECT_TIME);
1833 }
1834 }
1835
1836 static unsigned
1837 add_sat(unsigned a, unsigned b)
1838 {
1839 unsigned c = a + b;
1840
1841 return (c < a ? UINT_MAX : c);
1842 }
1843
1844 /*
1845 * rnd_add_data_1(rs, buf, len, entropybits, flag)
1846 *
1847 * Internal subroutine to call either entropy_enter_intr, if we're
1848 * in interrupt context, or entropy_enter if not, and to count the
1849 * entropy in an rndsource.
1850 */
1851 static void
1852 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
1853 uint32_t entropybits, uint32_t flag)
1854 {
1855 bool fullyused;
1856
1857 /*
1858 * If we're in interrupt context, use entropy_enter_intr and
1859 * take note of whether it consumed the full sample; if not,
1860 * use entropy_enter, which always consumes the full sample.
1861 */
1862 if (curlwp && cpu_intr_p()) {
1863 fullyused = entropy_enter_intr(buf, len, entropybits);
1864 } else {
1865 entropy_enter(buf, len, entropybits);
1866 fullyused = true;
1867 }
1868
1869 /*
1870 * If we used the full sample, note how many bits were
1871 * contributed from this source.
1872 */
1873 if (fullyused) {
1874 if (E->stage < ENTROPY_HOT) {
1875 if (E->stage >= ENTROPY_WARM)
1876 mutex_enter(&E->lock);
1877 rs->total = add_sat(rs->total, entropybits);
1878 switch (flag) {
1879 case RND_FLAG_COLLECT_TIME:
1880 rs->time_delta.insamples =
1881 add_sat(rs->time_delta.insamples, 1);
1882 break;
1883 case RND_FLAG_COLLECT_VALUE:
1884 rs->value_delta.insamples =
1885 add_sat(rs->value_delta.insamples, 1);
1886 break;
1887 }
1888 if (E->stage >= ENTROPY_WARM)
1889 mutex_exit(&E->lock);
1890 } else {
1891 struct rndsource_cpu *rc = percpu_getref(rs->state);
1892
1893 atomic_store_relaxed(&rc->rc_entropybits,
1894 add_sat(rc->rc_entropybits, entropybits));
1895 switch (flag) {
1896 case RND_FLAG_COLLECT_TIME:
1897 atomic_store_relaxed(&rc->rc_timesamples,
1898 add_sat(rc->rc_timesamples, 1));
1899 break;
1900 case RND_FLAG_COLLECT_VALUE:
1901 atomic_store_relaxed(&rc->rc_datasamples,
1902 add_sat(rc->rc_datasamples, 1));
1903 break;
1904 }
1905 percpu_putref(rs->state);
1906 }
1907 }
1908 }
1909
1910 /*
1911 * rnd_add_data_sync(rs, buf, len, entropybits)
1912 *
1913 * Same as rnd_add_data. Originally used in rndsource callbacks,
1914 * to break an unnecessary cycle; no longer really needed.
1915 */
1916 void
1917 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
1918 uint32_t entropybits)
1919 {
1920
1921 rnd_add_data(rs, buf, len, entropybits);
1922 }
1923
1924 /*
1925 * rndsource_entropybits(rs)
1926 *
1927 * Return approximately the number of bits of entropy that have
1928 * been contributed via rs so far. Approximate if other CPUs may
1929 * be calling rnd_add_data concurrently.
1930 */
1931 static unsigned
1932 rndsource_entropybits(struct krndsource *rs)
1933 {
1934 unsigned nbits = rs->total;
1935
1936 KASSERT(E->stage >= ENTROPY_WARM);
1937 KASSERT(rnd_sources_locked());
1938 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
1939 return nbits;
1940 }
1941
1942 static void
1943 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
1944 {
1945 struct rndsource_cpu *rc = ptr;
1946 unsigned *nbitsp = cookie;
1947 unsigned cpu_nbits;
1948
1949 cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
1950 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
1951 }
1952
1953 /*
1954 * rndsource_to_user(rs, urs)
1955 *
1956 * Copy a description of rs out to urs for userland.
1957 */
1958 static void
1959 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
1960 {
1961
1962 KASSERT(E->stage >= ENTROPY_WARM);
1963 KASSERT(rnd_sources_locked());
1964
1965 /* Avoid kernel memory disclosure. */
1966 memset(urs, 0, sizeof(*urs));
1967
1968 CTASSERT(sizeof(urs->name) == sizeof(rs->name));
1969 strlcpy(urs->name, rs->name, sizeof(urs->name));
1970 urs->total = rndsource_entropybits(rs);
1971 urs->type = rs->type;
1972 urs->flags = atomic_load_relaxed(&rs->flags);
1973 }
1974
1975 /*
1976 * rndsource_to_user_est(rs, urse)
1977 *
1978 * Copy a description of rs and estimation statistics out to urse
1979 * for userland.
1980 */
1981 static void
1982 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
1983 {
1984
1985 KASSERT(E->stage >= ENTROPY_WARM);
1986 KASSERT(rnd_sources_locked());
1987
1988 /* Avoid kernel memory disclosure. */
1989 memset(urse, 0, sizeof(*urse));
1990
1991 /* Copy out the rndsource description. */
1992 rndsource_to_user(rs, &urse->rt);
1993
1994 /* Gather the statistics. */
1995 urse->dt_samples = rs->time_delta.insamples;
1996 urse->dt_total = 0;
1997 urse->dv_samples = rs->value_delta.insamples;
1998 urse->dv_total = urse->rt.total;
1999 percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
2000 }
2001
2002 static void
2003 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2004 {
2005 struct rndsource_cpu *rc = ptr;
2006 rndsource_est_t *urse = cookie;
2007
2008 urse->dt_samples = add_sat(urse->dt_samples,
2009 atomic_load_relaxed(&rc->rc_timesamples));
2010 urse->dv_samples = add_sat(urse->dv_samples,
2011 atomic_load_relaxed(&rc->rc_datasamples));
2012 }
2013
2014 /*
2015 * entropy_reset_xc(arg1, arg2)
2016 *
2017 * Reset the current CPU's pending entropy to zero.
2018 */
2019 static void
2020 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
2021 {
2022 uint32_t extra = entropy_timer();
2023 struct entropy_cpu *ec;
2024 int s;
2025
2026 /*
2027 * Acquire the per-CPU state, blocking soft interrupts and
2028 * causing hard interrupts to drop samples on the floor.
2029 */
2030 ec = percpu_getref(entropy_percpu);
2031 s = splsoftserial();
2032 KASSERT(!ec->ec_locked);
2033 ec->ec_locked = true;
2034 __insn_barrier();
2035
2036 /* Zero the pending count and enter a cycle count for fun. */
2037 ec->ec_pending = 0;
2038 entpool_enter(ec->ec_pool, &extra, sizeof extra);
2039
2040 /* Release the per-CPU state. */
2041 KASSERT(ec->ec_locked);
2042 __insn_barrier();
2043 ec->ec_locked = false;
2044 splx(s);
2045 percpu_putref(entropy_percpu);
2046 }
2047
2048 /*
2049 * entropy_ioctl(cmd, data)
2050 *
2051 * Handle various /dev/random ioctl queries.
2052 */
2053 int
2054 entropy_ioctl(unsigned long cmd, void *data)
2055 {
2056 struct krndsource *rs;
2057 bool privileged;
2058 int error;
2059
2060 KASSERT(E->stage >= ENTROPY_WARM);
2061
2062 /* Verify user's authorization to perform the ioctl. */
2063 switch (cmd) {
2064 case RNDGETENTCNT:
2065 case RNDGETPOOLSTAT:
2066 case RNDGETSRCNUM:
2067 case RNDGETSRCNAME:
2068 case RNDGETESTNUM:
2069 case RNDGETESTNAME:
2070 error = kauth_authorize_device(curlwp->l_cred,
2071 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
2072 break;
2073 case RNDCTL:
2074 error = kauth_authorize_device(curlwp->l_cred,
2075 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
2076 break;
2077 case RNDADDDATA:
2078 error = kauth_authorize_device(curlwp->l_cred,
2079 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
2080 /* Ascertain whether the user's inputs should be counted. */
2081 if (kauth_authorize_device(curlwp->l_cred,
2082 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
2083 NULL, NULL, NULL, NULL) == 0)
2084 privileged = true;
2085 break;
2086 default: {
2087 /*
2088 * XXX Hack to avoid changing module ABI so this can be
2089 * pulled up. Later, we can just remove the argument.
2090 */
2091 static const struct fileops fops = {
2092 .fo_ioctl = rnd_system_ioctl,
2093 };
2094 struct file f = {
2095 .f_ops = &fops,
2096 };
2097 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
2098 enosys(), error);
2099 #if defined(_LP64)
2100 if (error == ENOSYS)
2101 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
2102 enosys(), error);
2103 #endif
2104 if (error == ENOSYS)
2105 error = ENOTTY;
2106 break;
2107 }
2108 }
2109
2110 /* If anything went wrong with authorization, stop here. */
2111 if (error)
2112 return error;
2113
2114 /* Dispatch on the command. */
2115 switch (cmd) {
2116 case RNDGETENTCNT: { /* Get current entropy count in bits. */
2117 uint32_t *countp = data;
2118
2119 mutex_enter(&E->lock);
2120 *countp = ENTROPY_CAPACITY*NBBY - E->needed;
2121 mutex_exit(&E->lock);
2122
2123 break;
2124 }
2125 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */
2126 rndpoolstat_t *pstat = data;
2127
2128 mutex_enter(&E->lock);
2129
2130 /* parameters */
2131 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
2132 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */
2133 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
2134
2135 /* state */
2136 pstat->added = 0; /* XXX total entropy_enter count */
2137 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed;
2138 pstat->removed = 0; /* XXX total entropy_extract count */
2139 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
2140 pstat->generated = 0; /* XXX bits of data...fabricated? */
2141
2142 mutex_exit(&E->lock);
2143 break;
2144 }
2145 case RNDGETSRCNUM: { /* Get entropy sources by number. */
2146 rndstat_t *stat = data;
2147 uint32_t start = 0, i = 0;
2148
2149 /* Skip if none requested; fail if too many requested. */
2150 if (stat->count == 0)
2151 break;
2152 if (stat->count > RND_MAXSTATCOUNT)
2153 return EINVAL;
2154
2155 /*
2156 * Under the lock, find the first one, copy out as many
2157 * as requested, and report how many we copied out.
2158 */
2159 mutex_enter(&E->lock);
2160 error = rnd_lock_sources();
2161 if (error) {
2162 mutex_exit(&E->lock);
2163 return error;
2164 }
2165 LIST_FOREACH(rs, &E->sources, list) {
2166 if (start++ == stat->start)
2167 break;
2168 }
2169 while (i < stat->count && rs != NULL) {
2170 mutex_exit(&E->lock);
2171 rndsource_to_user(rs, &stat->source[i++]);
2172 mutex_enter(&E->lock);
2173 rs = LIST_NEXT(rs, list);
2174 }
2175 KASSERT(i <= stat->count);
2176 stat->count = i;
2177 rnd_unlock_sources();
2178 mutex_exit(&E->lock);
2179 break;
2180 }
2181 case RNDGETESTNUM: { /* Get sources and estimates by number. */
2182 rndstat_est_t *estat = data;
2183 uint32_t start = 0, i = 0;
2184
2185 /* Skip if none requested; fail if too many requested. */
2186 if (estat->count == 0)
2187 break;
2188 if (estat->count > RND_MAXSTATCOUNT)
2189 return EINVAL;
2190
2191 /*
2192 * Under the lock, find the first one, copy out as many
2193 * as requested, and report how many we copied out.
2194 */
2195 mutex_enter(&E->lock);
2196 error = rnd_lock_sources();
2197 if (error) {
2198 mutex_exit(&E->lock);
2199 return error;
2200 }
2201 LIST_FOREACH(rs, &E->sources, list) {
2202 if (start++ == estat->start)
2203 break;
2204 }
2205 while (i < estat->count && rs != NULL) {
2206 mutex_exit(&E->lock);
2207 rndsource_to_user_est(rs, &estat->source[i++]);
2208 mutex_enter(&E->lock);
2209 rs = LIST_NEXT(rs, list);
2210 }
2211 KASSERT(i <= estat->count);
2212 estat->count = i;
2213 rnd_unlock_sources();
2214 mutex_exit(&E->lock);
2215 break;
2216 }
2217 case RNDGETSRCNAME: { /* Get entropy sources by name. */
2218 rndstat_name_t *nstat = data;
2219 const size_t n = sizeof(rs->name);
2220
2221 CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
2222
2223 /*
2224 * Under the lock, search by name. If found, copy it
2225 * out; if not found, fail with ENOENT.
2226 */
2227 mutex_enter(&E->lock);
2228 error = rnd_lock_sources();
2229 if (error) {
2230 mutex_exit(&E->lock);
2231 return error;
2232 }
2233 LIST_FOREACH(rs, &E->sources, list) {
2234 if (strncmp(rs->name, nstat->name, n) == 0)
2235 break;
2236 }
2237 if (rs != NULL) {
2238 mutex_exit(&E->lock);
2239 rndsource_to_user(rs, &nstat->source);
2240 mutex_enter(&E->lock);
2241 } else {
2242 error = ENOENT;
2243 }
2244 rnd_unlock_sources();
2245 mutex_exit(&E->lock);
2246 break;
2247 }
2248 case RNDGETESTNAME: { /* Get sources and estimates by name. */
2249 rndstat_est_name_t *enstat = data;
2250 const size_t n = sizeof(rs->name);
2251
2252 CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
2253
2254 /*
2255 * Under the lock, search by name. If found, copy it
2256 * out; if not found, fail with ENOENT.
2257 */
2258 mutex_enter(&E->lock);
2259 error = rnd_lock_sources();
2260 if (error) {
2261 mutex_exit(&E->lock);
2262 return error;
2263 }
2264 LIST_FOREACH(rs, &E->sources, list) {
2265 if (strncmp(rs->name, enstat->name, n) == 0)
2266 break;
2267 }
2268 if (rs != NULL) {
2269 mutex_exit(&E->lock);
2270 rndsource_to_user_est(rs, &enstat->source);
2271 mutex_enter(&E->lock);
2272 } else {
2273 error = ENOENT;
2274 }
2275 rnd_unlock_sources();
2276 mutex_exit(&E->lock);
2277 break;
2278 }
2279 case RNDCTL: { /* Modify entropy source flags. */
2280 rndctl_t *rndctl = data;
2281 const size_t n = sizeof(rs->name);
2282 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2283 uint32_t flags;
2284 bool reset = false, request = false;
2285
2286 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
2287
2288 /* Whitelist the flags that user can change. */
2289 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2290
2291 /*
2292 * For each matching rndsource, either by type if
2293 * specified or by name if not, set the masked flags.
2294 */
2295 mutex_enter(&E->lock);
2296 LIST_FOREACH(rs, &E->sources, list) {
2297 if (rndctl->type != 0xff) {
2298 if (rs->type != rndctl->type)
2299 continue;
2300 } else {
2301 if (strncmp(rs->name, rndctl->name, n) != 0)
2302 continue;
2303 }
2304 flags = rs->flags & ~rndctl->mask;
2305 flags |= rndctl->flags & rndctl->mask;
2306 if ((rs->flags & resetflags) == 0 &&
2307 (flags & resetflags) != 0)
2308 reset = true;
2309 if ((rs->flags ^ flags) & resetflags)
2310 request = true;
2311 atomic_store_relaxed(&rs->flags, flags);
2312 }
2313 mutex_exit(&E->lock);
2314
2315 /*
2316 * If we disabled estimation or collection, nix all the
2317 * pending entropy and set needed to the maximum.
2318 */
2319 if (reset) {
2320 xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
2321 mutex_enter(&E->lock);
2322 E->pending = 0;
2323 atomic_store_relaxed(&E->needed,
2324 ENTROPY_CAPACITY*NBBY);
2325 mutex_exit(&E->lock);
2326 }
2327
2328 /*
2329 * If we changed any of the estimation or collection
2330 * flags, request new samples from everyone -- either
2331 * to make up for what we just lost, or to get new
2332 * samples from what we just added.
2333 */
2334 if (request) {
2335 mutex_enter(&E->lock);
2336 entropy_request(ENTROPY_CAPACITY);
2337 mutex_exit(&E->lock);
2338 }
2339 break;
2340 }
2341 case RNDADDDATA: { /* Enter seed into entropy pool. */
2342 rnddata_t *rdata = data;
2343 unsigned entropybits = 0;
2344
2345 if (!atomic_load_relaxed(&entropy_collection))
2346 break; /* thanks but no thanks */
2347 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
2348 return EINVAL;
2349
2350 /*
2351 * This ioctl serves as the userland alternative a
2352 * bootloader-provided seed -- typically furnished by
2353 * /etc/rc.d/random_seed. We accept the user's entropy
2354 * claim only if
2355 *
2356 * (a) the user is privileged, and
2357 * (b) we have not entered a bootloader seed.
2358 *
2359 * under the assumption that the user may use this to
2360 * load a seed from disk that we have already loaded
2361 * from the bootloader, so we don't double-count it.
2362 */
2363 if (privileged && rdata->entropy && rdata->len) {
2364 mutex_enter(&E->lock);
2365 if (!E->seeded) {
2366 entropybits = MIN(rdata->entropy,
2367 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
2368 E->seeded = true;
2369 }
2370 mutex_exit(&E->lock);
2371 }
2372
2373 /* Enter the data and consolidate entropy. */
2374 rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
2375 entropybits);
2376 entropy_consolidate();
2377 break;
2378 }
2379 default:
2380 error = ENOTTY;
2381 }
2382
2383 /* Return any error that may have come up. */
2384 return error;
2385 }
2386
2387 /* Legacy entry points */
2388
2389 void
2390 rnd_seed(void *seed, size_t len)
2391 {
2392
2393 if (len != sizeof(rndsave_t)) {
2394 printf("entropy: invalid seed length: %zu,"
2395 " expected sizeof(rndsave_t) = %zu\n",
2396 len, sizeof(rndsave_t));
2397 return;
2398 }
2399 entropy_seed(seed);
2400 }
2401
2402 void
2403 rnd_init(void)
2404 {
2405
2406 entropy_init();
2407 }
2408
2409 void
2410 rnd_init_softint(void)
2411 {
2412
2413 entropy_init_late();
2414 }
2415
2416 int
2417 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
2418 {
2419
2420 return entropy_ioctl(cmd, data);
2421 }
2422