Home | History | Annotate | Line # | Download | only in kern
kern_entropy.c revision 1.16
      1 /*	$NetBSD: kern_entropy.c,v 1.16 2020/05/08 00:54:44 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Entropy subsystem
     34  *
     35  *	* Each CPU maintains a per-CPU entropy pool so that gathering
     36  *	  entropy requires no interprocessor synchronization, except
     37  *	  early at boot when we may be scrambling to gather entropy as
     38  *	  soon as possible.
     39  *
     40  *	  - entropy_enter gathers entropy and never drops it on the
     41  *	    floor, at the cost of sometimes having to do cryptography.
     42  *
     43  *	  - entropy_enter_intr gathers entropy or drops it on the
     44  *	    floor, with low latency.  Work to stir the pool or kick the
     45  *	    housekeeping thread is scheduled in soft interrupts.
     46  *
     47  *	* entropy_enter immediately enters into the global pool if it
     48  *	  can transition to full entropy in one swell foop.  Otherwise,
     49  *	  it defers to a housekeeping thread that consolidates entropy,
     50  *	  but only when the CPUs collectively have full entropy, in
     51  *	  order to mitigate iterative-guessing attacks.
     52  *
     53  *	* The entropy housekeeping thread continues to consolidate
     54  *	  entropy even after we think we have full entropy, in case we
     55  *	  are wrong, but is limited to one discretionary consolidation
     56  *	  per minute, and only when new entropy is actually coming in,
     57  *	  to limit performance impact.
     58  *
     59  *	* The entropy epoch is the number that changes when we
     60  *	  transition from partial entropy to full entropy, so that
     61  *	  users can easily determine when to reseed.  This also
     62  *	  facilitates an operator explicitly causing everything to
     63  *	  reseed by sysctl -w kern.entropy.consolidate=1.
     64  *
     65  *	* No entropy estimation based on the sample values, which is a
     66  *	  contradiction in terms and a potential source of side
     67  *	  channels.  It is the responsibility of the driver author to
     68  *	  study how predictable the physical source of input can ever
     69  *	  be, and to furnish a lower bound on the amount of entropy it
     70  *	  has.
     71  *
     72  *	* Entropy depletion is available for testing (or if you're into
     73  *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
     74  *	  the logic to support it is small, to minimize chance of bugs.
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.16 2020/05/08 00:54:44 riastradh Exp $");
     79 
     80 #include <sys/param.h>
     81 #include <sys/types.h>
     82 #include <sys/atomic.h>
     83 #include <sys/compat_stub.h>
     84 #include <sys/condvar.h>
     85 #include <sys/cpu.h>
     86 #include <sys/entropy.h>
     87 #include <sys/errno.h>
     88 #include <sys/evcnt.h>
     89 #include <sys/event.h>
     90 #include <sys/file.h>
     91 #include <sys/intr.h>
     92 #include <sys/kauth.h>
     93 #include <sys/kernel.h>
     94 #include <sys/kmem.h>
     95 #include <sys/kthread.h>
     96 #include <sys/module_hook.h>
     97 #include <sys/mutex.h>
     98 #include <sys/percpu.h>
     99 #include <sys/poll.h>
    100 #include <sys/queue.h>
    101 #include <sys/rnd.h>		/* legacy kernel API */
    102 #include <sys/rndio.h>		/* userland ioctl interface */
    103 #include <sys/rndsource.h>	/* kernel rndsource driver API */
    104 #include <sys/select.h>
    105 #include <sys/selinfo.h>
    106 #include <sys/sha1.h>		/* for boot seed checksum */
    107 #include <sys/stdint.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/systm.h>
    110 #include <sys/time.h>
    111 #include <sys/xcall.h>
    112 
    113 #include <lib/libkern/entpool.h>
    114 
    115 #include <machine/limits.h>
    116 
    117 #ifdef __HAVE_CPU_COUNTER
    118 #include <machine/cpu_counter.h>
    119 #endif
    120 
    121 /*
    122  * struct entropy_cpu
    123  *
    124  *	Per-CPU entropy state.  The pool is allocated separately
    125  *	because percpu(9) sometimes moves per-CPU objects around
    126  *	without zeroing them, which would lead to unwanted copies of
    127  *	sensitive secrets.  The evcnt is allocated separately becuase
    128  *	evcnt(9) assumes it stays put in memory.
    129  */
    130 struct entropy_cpu {
    131 	struct evcnt		*ec_softint_evcnt;
    132 	struct entpool		*ec_pool;
    133 	unsigned		ec_pending;
    134 	bool			ec_locked;
    135 };
    136 
    137 /*
    138  * struct rndsource_cpu
    139  *
    140  *	Per-CPU rndsource state.
    141  */
    142 struct rndsource_cpu {
    143 	unsigned		rc_nbits; /* bits of entropy added */
    144 };
    145 
    146 /*
    147  * entropy_global (a.k.a. E for short in this file)
    148  *
    149  *	Global entropy state.  Writes protected by the global lock.
    150  *	Some fields, marked (A), can be read outside the lock, and are
    151  *	maintained with atomic_load/store_relaxed.
    152  */
    153 struct {
    154 	kmutex_t	lock;		/* covers all global state */
    155 	struct entpool	pool;		/* global pool for extraction */
    156 	unsigned	needed;		/* (A) needed globally */
    157 	unsigned	pending;	/* (A) pending in per-CPU pools */
    158 	unsigned	timestamp;	/* (A) time of last consolidation */
    159 	unsigned	epoch;		/* (A) changes when needed -> 0 */
    160 	kcondvar_t	cv;		/* notifies state changes */
    161 	struct selinfo	selq;		/* notifies needed -> 0 */
    162 	struct lwp	*sourcelock;	/* lock on list of sources */
    163 	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
    164 	enum entropy_stage {
    165 		ENTROPY_COLD = 0, /* single-threaded */
    166 		ENTROPY_WARM,	  /* multi-threaded at boot before CPUs */
    167 		ENTROPY_HOT,	  /* multi-threaded multi-CPU */
    168 	}		stage;
    169 	bool		consolidate;	/* kick thread to consolidate */
    170 	bool		seed_rndsource;	/* true if seed source is attached */
    171 	bool		seeded;		/* true if seed file already loaded */
    172 } entropy_global __cacheline_aligned = {
    173 	/* Fields that must be initialized when the kernel is loaded.  */
    174 	.needed = ENTROPY_CAPACITY*NBBY,
    175 	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
    176 	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
    177 	.stage = ENTROPY_COLD,
    178 };
    179 
    180 #define	E	(&entropy_global)	/* declutter */
    181 
    182 /* Read-mostly globals */
    183 static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
    184 static void		*entropy_sih __read_mostly; /* softint handler */
    185 static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
    186 
    187 int rnd_initial_entropy __read_mostly; /* XXX legacy */
    188 
    189 static struct krndsource seed_rndsource __read_mostly;
    190 
    191 /*
    192  * Event counters
    193  *
    194  *	Must be careful with adding these because they can serve as
    195  *	side channels.
    196  */
    197 static struct evcnt entropy_discretionary_evcnt =
    198     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
    199 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
    200 static struct evcnt entropy_immediate_evcnt =
    201     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
    202 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
    203 static struct evcnt entropy_partial_evcnt =
    204     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
    205 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
    206 static struct evcnt entropy_consolidate_evcnt =
    207     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
    208 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
    209 static struct evcnt entropy_extract_intr_evcnt =
    210     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr");
    211 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt);
    212 static struct evcnt entropy_extract_fail_evcnt =
    213     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
    214 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
    215 static struct evcnt entropy_request_evcnt =
    216     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
    217 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
    218 static struct evcnt entropy_deplete_evcnt =
    219     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
    220 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
    221 static struct evcnt entropy_notify_evcnt =
    222     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
    223 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
    224 
    225 /* Sysctl knobs */
    226 bool	entropy_collection = 1;
    227 bool	entropy_depletion = 0; /* Silly!  */
    228 
    229 static const struct sysctlnode	*entropy_sysctlroot;
    230 static struct sysctllog		*entropy_sysctllog;
    231 
    232 /* Forward declarations */
    233 static void	entropy_init_cpu(void *, void *, struct cpu_info *);
    234 static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
    235 static void	entropy_account_cpu(struct entropy_cpu *);
    236 static void	entropy_enter(const void *, size_t, unsigned);
    237 static bool	entropy_enter_intr(const void *, size_t, unsigned);
    238 static void	entropy_softintr(void *);
    239 static void	entropy_thread(void *);
    240 static uint32_t	entropy_pending(void);
    241 static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
    242 static void	entropy_do_consolidate(void);
    243 static void	entropy_consolidate_xc(void *, void *);
    244 static void	entropy_notify(void);
    245 static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
    246 static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
    247 static void	filt_entropy_read_detach(struct knote *);
    248 static int	filt_entropy_read_event(struct knote *, long);
    249 static void	entropy_request(size_t);
    250 static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
    251 		    uint32_t);
    252 static unsigned	rndsource_entropybits(struct krndsource *);
    253 static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
    254 static void	rndsource_to_user(struct krndsource *, rndsource_t *);
    255 static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
    256 
    257 /*
    258  * entropy_timer()
    259  *
    260  *	Cycle counter, time counter, or anything that changes a wee bit
    261  *	unpredictably.
    262  */
    263 static inline uint32_t
    264 entropy_timer(void)
    265 {
    266 	struct bintime bt;
    267 	uint32_t v;
    268 
    269 	/* If we have a CPU cycle counter, use the low 32 bits.  */
    270 #ifdef __HAVE_CPU_COUNTER
    271 	if (__predict_true(cpu_hascounter()))
    272 		return cpu_counter32();
    273 #endif	/* __HAVE_CPU_COUNTER */
    274 
    275 	/* If we're cold, tough.  Can't binuptime while cold.  */
    276 	if (__predict_false(cold))
    277 		return 0;
    278 
    279 	/* Fold the 128 bits of binuptime into 32 bits.  */
    280 	binuptime(&bt);
    281 	v = bt.frac;
    282 	v ^= bt.frac >> 32;
    283 	v ^= bt.sec;
    284 	v ^= bt.sec >> 32;
    285 	return v;
    286 }
    287 
    288 static void
    289 attach_seed_rndsource(void)
    290 {
    291 
    292 	/*
    293 	 * First called no later than entropy_init, while we are still
    294 	 * single-threaded, so no need for RUN_ONCE.
    295 	 */
    296 	if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
    297 		return;
    298 	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
    299 	    RND_FLAG_COLLECT_VALUE);
    300 	E->seed_rndsource = true;
    301 }
    302 
    303 /*
    304  * entropy_init()
    305  *
    306  *	Initialize the entropy subsystem.  Panic on failure.
    307  *
    308  *	Requires percpu(9) and sysctl(9) to be initialized.
    309  */
    310 static void
    311 entropy_init(void)
    312 {
    313 	uint32_t extra[2];
    314 	struct krndsource *rs;
    315 	unsigned i = 0;
    316 
    317 	KASSERT(E->stage == ENTROPY_COLD);
    318 
    319 	/* Grab some cycle counts early at boot.  */
    320 	extra[i++] = entropy_timer();
    321 
    322 	/* Run the entropy pool cryptography self-test.  */
    323 	if (entpool_selftest() == -1)
    324 		panic("entropy pool crypto self-test failed");
    325 
    326 	/* Create the sysctl directory.  */
    327 	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
    328 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
    329 	    SYSCTL_DESCR("Entropy (random number sources) options"),
    330 	    NULL, 0, NULL, 0,
    331 	    CTL_KERN, CTL_CREATE, CTL_EOL);
    332 
    333 	/* Create the sysctl knobs.  */
    334 	/* XXX These shouldn't be writable at securelevel>0.  */
    335 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    336 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
    337 	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
    338 	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
    339 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    340 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
    341 	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
    342 	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
    343 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    344 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
    345 	    SYSCTL_DESCR("Trigger entropy consolidation now"),
    346 	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    347 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    348 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
    349 	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
    350 	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    351 	/* XXX These should maybe not be readable at securelevel>0.  */
    352 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    353 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    354 	    "needed", SYSCTL_DESCR("Systemwide entropy deficit"),
    355 	    NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL);
    356 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    357 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    358 	    "pending", SYSCTL_DESCR("Entropy pending on CPUs"),
    359 	    NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL);
    360 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    361 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    362 	    "epoch", SYSCTL_DESCR("Entropy epoch"),
    363 	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
    364 
    365 	/* Initialize the global state for multithreaded operation.  */
    366 	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM);
    367 	cv_init(&E->cv, "entropy");
    368 	selinit(&E->selq);
    369 
    370 	/* Make sure the seed source is attached.  */
    371 	attach_seed_rndsource();
    372 
    373 	/* Note if the bootloader didn't provide a seed.  */
    374 	if (!E->seeded)
    375 		printf("entropy: no seed from bootloader\n");
    376 
    377 	/* Allocate the per-CPU records for all early entropy sources.  */
    378 	LIST_FOREACH(rs, &E->sources, list)
    379 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
    380 
    381 	/* Enter the boot cycle count to get started.  */
    382 	extra[i++] = entropy_timer();
    383 	KASSERT(i == __arraycount(extra));
    384 	entropy_enter(extra, sizeof extra, 0);
    385 	explicit_memset(extra, 0, sizeof extra);
    386 
    387 	/* We are now ready for multi-threaded operation.  */
    388 	E->stage = ENTROPY_WARM;
    389 }
    390 
    391 /*
    392  * entropy_init_late()
    393  *
    394  *	Late initialization.  Panic on failure.
    395  *
    396  *	Requires CPUs to have been detected and LWPs to have started.
    397  */
    398 static void
    399 entropy_init_late(void)
    400 {
    401 	int error;
    402 
    403 	KASSERT(E->stage == ENTROPY_WARM);
    404 
    405 	/* Allocate and initialize the per-CPU state.  */
    406 	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
    407 	    entropy_init_cpu, entropy_fini_cpu, NULL);
    408 
    409 	/*
    410 	 * Establish the softint at the highest softint priority level.
    411 	 * Must happen after CPU detection.
    412 	 */
    413 	entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
    414 	    &entropy_softintr, NULL);
    415 	if (entropy_sih == NULL)
    416 		panic("unable to establish entropy softint");
    417 
    418 	/*
    419 	 * Create the entropy housekeeping thread.  Must happen after
    420 	 * lwpinit.
    421 	 */
    422 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
    423 	    entropy_thread, NULL, &entropy_lwp, "entbutler");
    424 	if (error)
    425 		panic("unable to create entropy housekeeping thread: %d",
    426 		    error);
    427 
    428 	/*
    429 	 * Wait until the per-CPU initialization has hit all CPUs
    430 	 * before proceeding to mark the entropy system hot.
    431 	 */
    432 	xc_barrier(XC_HIGHPRI);
    433 	E->stage = ENTROPY_HOT;
    434 }
    435 
    436 /*
    437  * entropy_init_cpu(ptr, cookie, ci)
    438  *
    439  *	percpu(9) constructor for per-CPU entropy pool.
    440  */
    441 static void
    442 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    443 {
    444 	struct entropy_cpu *ec = ptr;
    445 
    446 	ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt),
    447 	    KM_SLEEP);
    448 	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
    449 	ec->ec_pending = 0;
    450 	ec->ec_locked = false;
    451 
    452 	evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL,
    453 	    ci->ci_cpuname, "entropy softint");
    454 }
    455 
    456 /*
    457  * entropy_fini_cpu(ptr, cookie, ci)
    458  *
    459  *	percpu(9) destructor for per-CPU entropy pool.
    460  */
    461 static void
    462 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    463 {
    464 	struct entropy_cpu *ec = ptr;
    465 
    466 	/*
    467 	 * Zero any lingering data.  Disclosure of the per-CPU pool
    468 	 * shouldn't retroactively affect the security of any keys
    469 	 * generated, because entpool(9) erases whatever we have just
    470 	 * drawn out of any pool, but better safe than sorry.
    471 	 */
    472 	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
    473 
    474 	evcnt_detach(ec->ec_softint_evcnt);
    475 
    476 	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
    477 	kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt));
    478 }
    479 
    480 /*
    481  * entropy_seed(seed)
    482  *
    483  *	Seed the entropy pool with seed.  Meant to be called as early
    484  *	as possible by the bootloader; may be called before or after
    485  *	entropy_init.  Must be called before system reaches userland.
    486  *	Must be called in thread or soft interrupt context, not in hard
    487  *	interrupt context.  Must be called at most once.
    488  *
    489  *	Overwrites the seed in place.  Caller may then free the memory.
    490  */
    491 static void
    492 entropy_seed(rndsave_t *seed)
    493 {
    494 	SHA1_CTX ctx;
    495 	uint8_t digest[SHA1_DIGEST_LENGTH];
    496 	bool seeded;
    497 
    498 	/*
    499 	 * Verify the checksum.  If the checksum fails, take the data
    500 	 * but ignore the entropy estimate -- the file may have been
    501 	 * incompletely written with garbage, which is harmless to add
    502 	 * but may not be as unpredictable as alleged.
    503 	 */
    504 	SHA1Init(&ctx);
    505 	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
    506 	SHA1Update(&ctx, seed->data, sizeof(seed->data));
    507 	SHA1Final(digest, &ctx);
    508 	CTASSERT(sizeof(seed->digest) == sizeof(digest));
    509 	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
    510 		printf("entropy: invalid seed checksum\n");
    511 		seed->entropy = 0;
    512 	}
    513 	explicit_memset(&ctx, 0, sizeof ctx);
    514 	explicit_memset(digest, 0, sizeof digest);
    515 
    516 	/*
    517 	 * If the entropy is insensibly large, try byte-swapping.
    518 	 * Otherwise assume the file is corrupted and act as though it
    519 	 * has zero entropy.
    520 	 */
    521 	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
    522 		seed->entropy = bswap32(seed->entropy);
    523 		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
    524 			seed->entropy = 0;
    525 	}
    526 
    527 	/* Make sure the seed source is attached.  */
    528 	attach_seed_rndsource();
    529 
    530 	/* Test and set E->seeded.  */
    531 	if (E->stage >= ENTROPY_WARM)
    532 		mutex_enter(&E->lock);
    533 	seeded = E->seeded;
    534 	E->seeded = (seed->entropy > 0);
    535 	if (E->stage >= ENTROPY_WARM)
    536 		mutex_exit(&E->lock);
    537 
    538 	/*
    539 	 * If we've been seeded, may be re-entering the same seed
    540 	 * (e.g., bootloader vs module init, or something).  No harm in
    541 	 * entering it twice, but it contributes no additional entropy.
    542 	 */
    543 	if (seeded) {
    544 		printf("entropy: double-seeded by bootloader\n");
    545 		seed->entropy = 0;
    546 	} else {
    547 		printf("entropy: entering seed from bootloader"
    548 		    " with %u bits of entropy\n", (unsigned)seed->entropy);
    549 	}
    550 
    551 	/* Enter it into the pool and promptly zero it.  */
    552 	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
    553 	    seed->entropy);
    554 	explicit_memset(seed, 0, sizeof(*seed));
    555 }
    556 
    557 /*
    558  * entropy_bootrequest()
    559  *
    560  *	Request entropy from all sources at boot, once config is
    561  *	complete and interrupts are running.
    562  */
    563 void
    564 entropy_bootrequest(void)
    565 {
    566 
    567 	KASSERT(E->stage >= ENTROPY_WARM);
    568 
    569 	/*
    570 	 * Request enough to satisfy the maximum entropy shortage.
    571 	 * This is harmless overkill if the bootloader provided a seed.
    572 	 */
    573 	mutex_enter(&E->lock);
    574 	entropy_request(ENTROPY_CAPACITY);
    575 	mutex_exit(&E->lock);
    576 }
    577 
    578 /*
    579  * entropy_epoch()
    580  *
    581  *	Returns the current entropy epoch.  If this changes, you should
    582  *	reseed.  If -1, means system entropy has not yet reached full
    583  *	entropy or been explicitly consolidated; never reverts back to
    584  *	-1.  Never zero, so you can always use zero as an uninitialized
    585  *	sentinel value meaning `reseed ASAP'.
    586  *
    587  *	Usage model:
    588  *
    589  *		struct foo {
    590  *			struct crypto_prng prng;
    591  *			unsigned epoch;
    592  *		} *foo;
    593  *
    594  *		unsigned epoch = entropy_epoch();
    595  *		if (__predict_false(epoch != foo->epoch)) {
    596  *			uint8_t seed[32];
    597  *			if (entropy_extract(seed, sizeof seed, 0) != 0)
    598  *				warn("no entropy");
    599  *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
    600  *			foo->epoch = epoch;
    601  *		}
    602  */
    603 unsigned
    604 entropy_epoch(void)
    605 {
    606 
    607 	/*
    608 	 * Unsigned int, so no need for seqlock for an atomic read, but
    609 	 * make sure we read it afresh each time.
    610 	 */
    611 	return atomic_load_relaxed(&E->epoch);
    612 }
    613 
    614 /*
    615  * entropy_account_cpu(ec)
    616  *
    617  *	Consider whether to consolidate entropy into the global pool
    618  *	after we just added some into the current CPU's pending pool.
    619  *
    620  *	- If this CPU can provide enough entropy now, do so.
    621  *
    622  *	- If this and whatever else is available on other CPUs can
    623  *	  provide enough entropy, kick the consolidation thread.
    624  *
    625  *	- Otherwise, do as little as possible, except maybe consolidate
    626  *	  entropy at most once a minute.
    627  *
    628  *	Caller must be bound to a CPU and therefore have exclusive
    629  *	access to ec.  Will acquire and release the global lock.
    630  */
    631 static void
    632 entropy_account_cpu(struct entropy_cpu *ec)
    633 {
    634 	unsigned diff;
    635 
    636 	KASSERT(E->stage == ENTROPY_HOT);
    637 
    638 	/*
    639 	 * If there's no entropy needed, and entropy has been
    640 	 * consolidated in the last minute, do nothing.
    641 	 */
    642 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
    643 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
    644 	    __predict_true((time_uptime - E->timestamp) <= 60))
    645 		return;
    646 
    647 	/* If there's nothing pending, stop here.  */
    648 	if (ec->ec_pending == 0)
    649 		return;
    650 
    651 	/* Consider consolidation, under the lock.  */
    652 	mutex_enter(&E->lock);
    653 	if (E->needed != 0 && E->needed <= ec->ec_pending) {
    654 		/*
    655 		 * If we have not yet attained full entropy but we can
    656 		 * now, do so.  This way we disseminate entropy
    657 		 * promptly when it becomes available early at boot;
    658 		 * otherwise we leave it to the entropy consolidation
    659 		 * thread, which is rate-limited to mitigate side
    660 		 * channels and abuse.
    661 		 */
    662 		uint8_t buf[ENTPOOL_CAPACITY];
    663 
    664 		/* Transfer from the local pool to the global pool.  */
    665 		entpool_extract(ec->ec_pool, buf, sizeof buf);
    666 		entpool_enter(&E->pool, buf, sizeof buf);
    667 		atomic_store_relaxed(&ec->ec_pending, 0);
    668 		atomic_store_relaxed(&E->needed, 0);
    669 
    670 		/* Notify waiters that we now have full entropy.  */
    671 		entropy_notify();
    672 		entropy_immediate_evcnt.ev_count++;
    673 	} else if (ec->ec_pending) {
    674 		/* Record how much we can add to the global pool.  */
    675 		diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending);
    676 		E->pending += diff;
    677 		atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff);
    678 
    679 		/*
    680 		 * This should have made a difference unless we were
    681 		 * already saturated.
    682 		 */
    683 		KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY);
    684 		KASSERT(E->pending);
    685 
    686 		if (E->needed <= E->pending) {
    687 			/*
    688 			 * Enough entropy between all the per-CPU
    689 			 * pools.  Wake up the housekeeping thread.
    690 			 *
    691 			 * If we don't need any entropy, this doesn't
    692 			 * mean much, but it is the only time we ever
    693 			 * gather additional entropy in case the
    694 			 * accounting has been overly optimistic.  This
    695 			 * happens at most once a minute, so there's
    696 			 * negligible performance cost.
    697 			 */
    698 			E->consolidate = true;
    699 			cv_broadcast(&E->cv);
    700 			if (E->needed == 0)
    701 				entropy_discretionary_evcnt.ev_count++;
    702 		} else {
    703 			/* Can't get full entropy.  Keep gathering.  */
    704 			entropy_partial_evcnt.ev_count++;
    705 		}
    706 	}
    707 	mutex_exit(&E->lock);
    708 }
    709 
    710 /*
    711  * entropy_enter_early(buf, len, nbits)
    712  *
    713  *	Do entropy bookkeeping globally, before we have established
    714  *	per-CPU pools.  Enter directly into the global pool in the hope
    715  *	that we enter enough before the first entropy_extract to thwart
    716  *	iterative-guessing attacks; entropy_extract will warn if not.
    717  */
    718 static void
    719 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
    720 {
    721 	bool notify = false;
    722 
    723 	if (E->stage >= ENTROPY_WARM)
    724 		mutex_enter(&E->lock);
    725 
    726 	/* Enter it into the pool.  */
    727 	entpool_enter(&E->pool, buf, len);
    728 
    729 	/*
    730 	 * Decide whether to notify reseed -- we will do so if either:
    731 	 * (a) we transition from partial entropy to full entropy, or
    732 	 * (b) we get a batch of full entropy all at once.
    733 	 */
    734 	notify |= (E->needed && E->needed <= nbits);
    735 	notify |= (nbits >= ENTROPY_CAPACITY*NBBY);
    736 
    737 	/* Subtract from the needed count and notify if appropriate.  */
    738 	E->needed -= MIN(E->needed, nbits);
    739 	if (notify) {
    740 		entropy_notify();
    741 		entropy_immediate_evcnt.ev_count++;
    742 	}
    743 
    744 	if (E->stage >= ENTROPY_WARM)
    745 		mutex_exit(&E->lock);
    746 }
    747 
    748 /*
    749  * entropy_enter(buf, len, nbits)
    750  *
    751  *	Enter len bytes of data from buf into the system's entropy
    752  *	pool, stirring as necessary when the internal buffer fills up.
    753  *	nbits is a lower bound on the number of bits of entropy in the
    754  *	process that led to this sample.
    755  */
    756 static void
    757 entropy_enter(const void *buf, size_t len, unsigned nbits)
    758 {
    759 	struct entropy_cpu *ec;
    760 	uint32_t pending;
    761 	int s;
    762 
    763 	KASSERTMSG(!cpu_intr_p(),
    764 	    "use entropy_enter_intr from interrupt context");
    765 	KASSERTMSG(howmany(nbits, NBBY) <= len,
    766 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
    767 
    768 	/* If it's too early after boot, just use entropy_enter_early.  */
    769 	if (__predict_false(E->stage < ENTROPY_HOT)) {
    770 		entropy_enter_early(buf, len, nbits);
    771 		return;
    772 	}
    773 
    774 	/*
    775 	 * Acquire the per-CPU state, blocking soft interrupts and
    776 	 * causing hard interrupts to drop samples on the floor.
    777 	 */
    778 	ec = percpu_getref(entropy_percpu);
    779 	s = splsoftserial();
    780 	KASSERT(!ec->ec_locked);
    781 	ec->ec_locked = true;
    782 	__insn_barrier();
    783 
    784 	/* Enter into the per-CPU pool.  */
    785 	entpool_enter(ec->ec_pool, buf, len);
    786 
    787 	/* Count up what we can add.  */
    788 	pending = ec->ec_pending;
    789 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
    790 	atomic_store_relaxed(&ec->ec_pending, pending);
    791 
    792 	/* Consolidate globally if appropriate based on what we added.  */
    793 	entropy_account_cpu(ec);
    794 
    795 	/* Release the per-CPU state.  */
    796 	KASSERT(ec->ec_locked);
    797 	__insn_barrier();
    798 	ec->ec_locked = false;
    799 	splx(s);
    800 	percpu_putref(entropy_percpu);
    801 }
    802 
    803 /*
    804  * entropy_enter_intr(buf, len, nbits)
    805  *
    806  *	Enter up to len bytes of data from buf into the system's
    807  *	entropy pool without stirring.  nbits is a lower bound on the
    808  *	number of bits of entropy in the process that led to this
    809  *	sample.  If the sample could be entered completely, assume
    810  *	nbits of entropy pending; otherwise assume none, since we don't
    811  *	know whether some parts of the sample are constant, for
    812  *	instance.  Schedule a softint to stir the entropy pool if
    813  *	needed.  Return true if used fully, false if truncated at all.
    814  *
    815  *	Using this in thread context will work, but you might as well
    816  *	use entropy_enter in that case.
    817  */
    818 static bool
    819 entropy_enter_intr(const void *buf, size_t len, unsigned nbits)
    820 {
    821 	struct entropy_cpu *ec;
    822 	bool fullyused = false;
    823 	uint32_t pending;
    824 
    825 	KASSERTMSG(howmany(nbits, NBBY) <= len,
    826 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
    827 
    828 	/* If it's too early after boot, just use entropy_enter_early.  */
    829 	if (__predict_false(E->stage < ENTROPY_HOT)) {
    830 		entropy_enter_early(buf, len, nbits);
    831 		return true;
    832 	}
    833 
    834 	/*
    835 	 * Acquire the per-CPU state.  If someone is in the middle of
    836 	 * using it, drop the sample.  Otherwise, take the lock so that
    837 	 * higher-priority interrupts will drop their samples.
    838 	 */
    839 	ec = percpu_getref(entropy_percpu);
    840 	if (ec->ec_locked)
    841 		goto out0;
    842 	ec->ec_locked = true;
    843 	__insn_barrier();
    844 
    845 	/*
    846 	 * Enter as much as we can into the per-CPU pool.  If it was
    847 	 * truncated, schedule a softint to stir the pool and stop.
    848 	 */
    849 	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
    850 		softint_schedule(entropy_sih);
    851 		goto out1;
    852 	}
    853 	fullyused = true;
    854 
    855 	/* Count up what we can contribute.  */
    856 	pending = ec->ec_pending;
    857 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
    858 	atomic_store_relaxed(&ec->ec_pending, pending);
    859 
    860 	/* Schedule a softint if we added anything and it matters.  */
    861 	if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
    862 		atomic_load_relaxed(&entropy_depletion)) &&
    863 	    nbits != 0)
    864 		softint_schedule(entropy_sih);
    865 
    866 out1:	/* Release the per-CPU state.  */
    867 	KASSERT(ec->ec_locked);
    868 	__insn_barrier();
    869 	ec->ec_locked = false;
    870 out0:	percpu_putref(entropy_percpu);
    871 
    872 	return fullyused;
    873 }
    874 
    875 /*
    876  * entropy_softintr(cookie)
    877  *
    878  *	Soft interrupt handler for entering entropy.  Takes care of
    879  *	stirring the local CPU's entropy pool if it filled up during
    880  *	hard interrupts, and promptly crediting entropy from the local
    881  *	CPU's entropy pool to the global entropy pool if needed.
    882  */
    883 static void
    884 entropy_softintr(void *cookie)
    885 {
    886 	struct entropy_cpu *ec;
    887 
    888 	/*
    889 	 * Acquire the per-CPU state.  Other users can lock this only
    890 	 * while soft interrupts are blocked.  Cause hard interrupts to
    891 	 * drop samples on the floor.
    892 	 */
    893 	ec = percpu_getref(entropy_percpu);
    894 	KASSERT(!ec->ec_locked);
    895 	ec->ec_locked = true;
    896 	__insn_barrier();
    897 
    898 	/* Count statistics.  */
    899 	ec->ec_softint_evcnt->ev_count++;
    900 
    901 	/* Stir the pool if necessary.  */
    902 	entpool_stir(ec->ec_pool);
    903 
    904 	/* Consolidate globally if appropriate based on what we added.  */
    905 	entropy_account_cpu(ec);
    906 
    907 	/* Release the per-CPU state.  */
    908 	KASSERT(ec->ec_locked);
    909 	__insn_barrier();
    910 	ec->ec_locked = false;
    911 	percpu_putref(entropy_percpu);
    912 }
    913 
    914 /*
    915  * entropy_thread(cookie)
    916  *
    917  *	Handle any asynchronous entropy housekeeping.
    918  */
    919 static void
    920 entropy_thread(void *cookie)
    921 {
    922 	bool consolidate;
    923 
    924 	for (;;) {
    925 		/*
    926 		 * Wait until there's full entropy somewhere among the
    927 		 * CPUs, as confirmed at most once per minute, or
    928 		 * someone wants to consolidate.
    929 		 */
    930 		if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) {
    931 			consolidate = true;
    932 		} else {
    933 			mutex_enter(&E->lock);
    934 			if (!E->consolidate)
    935 				cv_timedwait(&E->cv, &E->lock, 60*hz);
    936 			consolidate = E->consolidate;
    937 			E->consolidate = false;
    938 			mutex_exit(&E->lock);
    939 		}
    940 
    941 		if (consolidate) {
    942 			/* Do it.  */
    943 			entropy_do_consolidate();
    944 
    945 			/* Mitigate abuse.  */
    946 			kpause("entropy", false, hz, NULL);
    947 		}
    948 	}
    949 }
    950 
    951 /*
    952  * entropy_pending()
    953  *
    954  *	Count up the amount of entropy pending on other CPUs.
    955  */
    956 static uint32_t
    957 entropy_pending(void)
    958 {
    959 	uint32_t pending = 0;
    960 
    961 	percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending);
    962 	return pending;
    963 }
    964 
    965 static void
    966 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    967 {
    968 	struct entropy_cpu *ec = ptr;
    969 	uint32_t *pendingp = cookie;
    970 	uint32_t cpu_pending;
    971 
    972 	cpu_pending = atomic_load_relaxed(&ec->ec_pending);
    973 	*pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending);
    974 }
    975 
    976 /*
    977  * entropy_do_consolidate()
    978  *
    979  *	Issue a cross-call to gather entropy on all CPUs and advance
    980  *	the entropy epoch.
    981  */
    982 static void
    983 entropy_do_consolidate(void)
    984 {
    985 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
    986 	static struct timeval lasttime; /* serialized by E->lock */
    987 	unsigned diff;
    988 	uint64_t ticket;
    989 
    990 	/* Gather entropy on all CPUs.  */
    991 	ticket = xc_broadcast(0, &entropy_consolidate_xc, NULL, NULL);
    992 	xc_wait(ticket);
    993 
    994 	/* Acquire the lock to notify waiters.  */
    995 	mutex_enter(&E->lock);
    996 
    997 	/* Count another consolidation.  */
    998 	entropy_consolidate_evcnt.ev_count++;
    999 
   1000 	/* Note when we last consolidated, i.e. now.  */
   1001 	E->timestamp = time_uptime;
   1002 
   1003 	/* Count the entropy that was gathered.  */
   1004 	diff = MIN(E->needed, E->pending);
   1005 	atomic_store_relaxed(&E->needed, E->needed - diff);
   1006 	E->pending -= diff;
   1007 	if (__predict_false(E->needed > 0)) {
   1008 		if (ratecheck(&lasttime, &interval))
   1009 			printf("entropy: WARNING:"
   1010 			    " consolidating less than full entropy\n");
   1011 	}
   1012 
   1013 	/* Advance the epoch and notify waiters.  */
   1014 	entropy_notify();
   1015 
   1016 	/* Release the lock.  */
   1017 	mutex_exit(&E->lock);
   1018 }
   1019 
   1020 /*
   1021  * entropy_consolidate_xc(arg1, arg2)
   1022  *
   1023  *	Extract output from the local CPU's input pool and enter it
   1024  *	into the global pool.
   1025  */
   1026 static void
   1027 entropy_consolidate_xc(void *arg1 __unused, void *arg2 __unused)
   1028 {
   1029 	struct entropy_cpu *ec;
   1030 	uint8_t buf[ENTPOOL_CAPACITY];
   1031 	uint32_t extra[7];
   1032 	unsigned i = 0;
   1033 	int s;
   1034 
   1035 	/* Grab CPU number and cycle counter to mix extra into the pool.  */
   1036 	extra[i++] = cpu_number();
   1037 	extra[i++] = entropy_timer();
   1038 
   1039 	/*
   1040 	 * Acquire the per-CPU state, blocking soft interrupts and
   1041 	 * discarding entropy in hard interrupts, so that we can
   1042 	 * extract from the per-CPU pool.
   1043 	 */
   1044 	ec = percpu_getref(entropy_percpu);
   1045 	s = splsoftserial();
   1046 	KASSERT(!ec->ec_locked);
   1047 	ec->ec_locked = true;
   1048 	__insn_barrier();
   1049 	extra[i++] = entropy_timer();
   1050 
   1051 	/* Extract the data and count it no longer pending.  */
   1052 	entpool_extract(ec->ec_pool, buf, sizeof buf);
   1053 	atomic_store_relaxed(&ec->ec_pending, 0);
   1054 	extra[i++] = entropy_timer();
   1055 
   1056 	/* Release the per-CPU state.  */
   1057 	KASSERT(ec->ec_locked);
   1058 	__insn_barrier();
   1059 	ec->ec_locked = false;
   1060 	splx(s);
   1061 	percpu_putref(entropy_percpu);
   1062 	extra[i++] = entropy_timer();
   1063 
   1064 	/*
   1065 	 * Copy over statistics, and enter the per-CPU extract and the
   1066 	 * extra timing into the global pool, under the global lock.
   1067 	 */
   1068 	mutex_enter(&E->lock);
   1069 	extra[i++] = entropy_timer();
   1070 	entpool_enter(&E->pool, buf, sizeof buf);
   1071 	explicit_memset(buf, 0, sizeof buf);
   1072 	extra[i++] = entropy_timer();
   1073 	KASSERT(i == __arraycount(extra));
   1074 	entpool_enter(&E->pool, extra, sizeof extra);
   1075 	explicit_memset(extra, 0, sizeof extra);
   1076 	mutex_exit(&E->lock);
   1077 }
   1078 
   1079 /*
   1080  * entropy_notify()
   1081  *
   1082  *	Caller just contributed entropy to the global pool.  Advance
   1083  *	the entropy epoch and notify waiters.
   1084  *
   1085  *	Caller must hold the global entropy lock.  Except for the
   1086  *	`sysctl -w kern.entropy.consolidate=1` trigger, the caller must
   1087  *	have just have transitioned from partial entropy to full
   1088  *	entropy -- E->needed should be zero now.
   1089  */
   1090 static void
   1091 entropy_notify(void)
   1092 {
   1093 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1094 	static struct timeval lasttime; /* serialized by E->lock */
   1095 	unsigned epoch;
   1096 
   1097 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1098 
   1099 	/*
   1100 	 * If this is the first time, print a message to the console
   1101 	 * that we're ready so operators can compare it to the timing
   1102 	 * of other events.
   1103 	 */
   1104 	if (__predict_false(!rnd_initial_entropy) && E->needed == 0) {
   1105 		printf("entropy: ready\n");
   1106 		rnd_initial_entropy = 1;
   1107 	}
   1108 
   1109 	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
   1110 	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
   1111 	    ratecheck(&lasttime, &interval)) {
   1112 		epoch = E->epoch + 1;
   1113 		if (epoch == 0 || epoch == (unsigned)-1)
   1114 			epoch = 1;
   1115 		atomic_store_relaxed(&E->epoch, epoch);
   1116 	}
   1117 
   1118 	/* Notify waiters.  */
   1119 	if (E->stage >= ENTROPY_WARM) {
   1120 		cv_broadcast(&E->cv);
   1121 		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
   1122 	}
   1123 
   1124 	/* Count another notification.  */
   1125 	entropy_notify_evcnt.ev_count++;
   1126 }
   1127 
   1128 /*
   1129  * entropy_consolidate()
   1130  *
   1131  *	Trigger entropy consolidation and wait for it to complete.
   1132  *
   1133  *	This should be used sparingly, not periodically -- requiring
   1134  *	conscious intervention by the operator or a clear policy
   1135  *	decision.  Otherwise, the kernel will automatically consolidate
   1136  *	when enough entropy has been gathered into per-CPU pools to
   1137  *	transition to full entropy.
   1138  */
   1139 void
   1140 entropy_consolidate(void)
   1141 {
   1142 	uint64_t ticket;
   1143 	int error;
   1144 
   1145 	KASSERT(E->stage == ENTROPY_HOT);
   1146 
   1147 	mutex_enter(&E->lock);
   1148 	ticket = entropy_consolidate_evcnt.ev_count;
   1149 	E->consolidate = true;
   1150 	cv_broadcast(&E->cv);
   1151 	while (ticket == entropy_consolidate_evcnt.ev_count) {
   1152 		error = cv_wait_sig(&E->cv, &E->lock);
   1153 		if (error)
   1154 			break;
   1155 	}
   1156 	mutex_exit(&E->lock);
   1157 }
   1158 
   1159 /*
   1160  * sysctl -w kern.entropy.consolidate=1
   1161  *
   1162  *	Trigger entropy consolidation and wait for it to complete.
   1163  *	Writable only by superuser.  This, writing to /dev/random, and
   1164  *	ioctl(RNDADDDATA) are the only ways for the system to
   1165  *	consolidate entropy if the operator knows something the kernel
   1166  *	doesn't about how unpredictable the pending entropy pools are.
   1167  */
   1168 static int
   1169 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
   1170 {
   1171 	struct sysctlnode node = *rnode;
   1172 	int arg;
   1173 	int error;
   1174 
   1175 	KASSERT(E->stage == ENTROPY_HOT);
   1176 
   1177 	node.sysctl_data = &arg;
   1178 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1179 	if (error || newp == NULL)
   1180 		return error;
   1181 	if (arg)
   1182 		entropy_consolidate();
   1183 
   1184 	return error;
   1185 }
   1186 
   1187 /*
   1188  * sysctl -w kern.entropy.gather=1
   1189  *
   1190  *	Trigger gathering entropy from all on-demand sources, and wait
   1191  *	for synchronous sources (but not asynchronous sources) to
   1192  *	complete.  Writable only by superuser.
   1193  */
   1194 static int
   1195 sysctl_entropy_gather(SYSCTLFN_ARGS)
   1196 {
   1197 	struct sysctlnode node = *rnode;
   1198 	int arg;
   1199 	int error;
   1200 
   1201 	KASSERT(E->stage == ENTROPY_HOT);
   1202 
   1203 	node.sysctl_data = &arg;
   1204 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1205 	if (error || newp == NULL)
   1206 		return error;
   1207 	if (arg) {
   1208 		mutex_enter(&E->lock);
   1209 		entropy_request(ENTROPY_CAPACITY);
   1210 		mutex_exit(&E->lock);
   1211 	}
   1212 
   1213 	return 0;
   1214 }
   1215 
   1216 /*
   1217  * entropy_extract(buf, len, flags)
   1218  *
   1219  *	Extract len bytes from the global entropy pool into buf.
   1220  *
   1221  *	Flags may have:
   1222  *
   1223  *		ENTROPY_WAIT	Wait for entropy if not available yet.
   1224  *		ENTROPY_SIG	Allow interruption by a signal during wait.
   1225  *
   1226  *	Return zero on success, or error on failure:
   1227  *
   1228  *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
   1229  *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
   1230  *
   1231  *	If ENTROPY_WAIT is set, allowed only in thread context.  If
   1232  *	ENTROPY_WAIT is not set, allowed up to IPL_VM.  (XXX That's
   1233  *	awfully high...  Do we really need it in hard interrupts?  This
   1234  *	arises from use of cprng_strong(9).)
   1235  */
   1236 int
   1237 entropy_extract(void *buf, size_t len, int flags)
   1238 {
   1239 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1240 	static struct timeval lasttime; /* serialized by E->lock */
   1241 	int error;
   1242 
   1243 	if (ISSET(flags, ENTROPY_WAIT)) {
   1244 		ASSERT_SLEEPABLE();
   1245 		KASSERTMSG(E->stage >= ENTROPY_WARM,
   1246 		    "can't wait for entropy until warm");
   1247 	}
   1248 
   1249 	/* Acquire the global lock to get at the global pool.  */
   1250 	if (E->stage >= ENTROPY_WARM)
   1251 		mutex_enter(&E->lock);
   1252 
   1253 	/* Count up request for entropy in interrupt context.  */
   1254 	if (cpu_intr_p())
   1255 		entropy_extract_intr_evcnt.ev_count++;
   1256 
   1257 	/* Wait until there is enough entropy in the system.  */
   1258 	error = 0;
   1259 	while (E->needed) {
   1260 		/* Ask for more, synchronously if possible.  */
   1261 		entropy_request(len);
   1262 
   1263 		/* If we got enough, we're done.  */
   1264 		if (E->needed == 0) {
   1265 			KASSERT(error == 0);
   1266 			break;
   1267 		}
   1268 
   1269 		/* If not waiting, stop here.  */
   1270 		if (!ISSET(flags, ENTROPY_WAIT)) {
   1271 			error = EWOULDBLOCK;
   1272 			break;
   1273 		}
   1274 
   1275 		/* Wait for some entropy to come in and try again.  */
   1276 		KASSERT(E->stage >= ENTROPY_WARM);
   1277 		if (ISSET(flags, ENTROPY_SIG)) {
   1278 			error = cv_wait_sig(&E->cv, &E->lock);
   1279 			if (error)
   1280 				break;
   1281 		} else {
   1282 			cv_wait(&E->cv, &E->lock);
   1283 		}
   1284 	}
   1285 
   1286 	/* Count failure -- but fill the buffer nevertheless.  */
   1287 	if (error)
   1288 		entropy_extract_fail_evcnt.ev_count++;
   1289 
   1290 	/*
   1291 	 * Report a warning if we have never yet reached full entropy.
   1292 	 * This is the only case where we consider entropy to be
   1293 	 * `depleted' without kern.entropy.depletion enabled -- when we
   1294 	 * only have partial entropy, an adversary may be able to
   1295 	 * narrow the state of the pool down to a small number of
   1296 	 * possibilities; the output then enables them to confirm a
   1297 	 * guess, reducing its entropy from the adversary's perspective
   1298 	 * to zero.
   1299 	 */
   1300 	if (__predict_false(E->epoch == (unsigned)-1)) {
   1301 		if (ratecheck(&lasttime, &interval))
   1302 			printf("entropy: WARNING:"
   1303 			    " extracting entropy too early\n");
   1304 		atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY);
   1305 	}
   1306 
   1307 	/* Extract data from the pool, and `deplete' if we're doing that.  */
   1308 	entpool_extract(&E->pool, buf, len);
   1309 	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
   1310 	    error == 0) {
   1311 		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
   1312 
   1313 		atomic_store_relaxed(&E->needed,
   1314 		    E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost));
   1315 		entropy_deplete_evcnt.ev_count++;
   1316 	}
   1317 
   1318 	/* Release the global lock and return the error.  */
   1319 	if (E->stage >= ENTROPY_WARM)
   1320 		mutex_exit(&E->lock);
   1321 	return error;
   1322 }
   1323 
   1324 /*
   1325  * entropy_poll(events)
   1326  *
   1327  *	Return the subset of events ready, and if it is not all of
   1328  *	events, record curlwp as waiting for entropy.
   1329  */
   1330 int
   1331 entropy_poll(int events)
   1332 {
   1333 	int revents = 0;
   1334 
   1335 	KASSERT(E->stage >= ENTROPY_WARM);
   1336 
   1337 	/* Always ready for writing.  */
   1338 	revents |= events & (POLLOUT|POLLWRNORM);
   1339 
   1340 	/* Narrow it down to reads.  */
   1341 	events &= POLLIN|POLLRDNORM;
   1342 	if (events == 0)
   1343 		return revents;
   1344 
   1345 	/*
   1346 	 * If we have reached full entropy and we're not depleting
   1347 	 * entropy, we are forever ready.
   1348 	 */
   1349 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
   1350 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
   1351 		return revents | events;
   1352 
   1353 	/*
   1354 	 * Otherwise, check whether we need entropy under the lock.  If
   1355 	 * we don't, we're ready; if we do, add ourselves to the queue.
   1356 	 */
   1357 	mutex_enter(&E->lock);
   1358 	if (E->needed == 0)
   1359 		revents |= events;
   1360 	else
   1361 		selrecord(curlwp, &E->selq);
   1362 	mutex_exit(&E->lock);
   1363 
   1364 	return revents;
   1365 }
   1366 
   1367 /*
   1368  * filt_entropy_read_detach(kn)
   1369  *
   1370  *	struct filterops::f_detach callback for entropy read events:
   1371  *	remove kn from the list of waiters.
   1372  */
   1373 static void
   1374 filt_entropy_read_detach(struct knote *kn)
   1375 {
   1376 
   1377 	KASSERT(E->stage >= ENTROPY_WARM);
   1378 
   1379 	mutex_enter(&E->lock);
   1380 	SLIST_REMOVE(&E->selq.sel_klist, kn, knote, kn_selnext);
   1381 	mutex_exit(&E->lock);
   1382 }
   1383 
   1384 /*
   1385  * filt_entropy_read_event(kn, hint)
   1386  *
   1387  *	struct filterops::f_event callback for entropy read events:
   1388  *	poll for entropy.  Caller must hold the global entropy lock if
   1389  *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
   1390  */
   1391 static int
   1392 filt_entropy_read_event(struct knote *kn, long hint)
   1393 {
   1394 	int ret;
   1395 
   1396 	KASSERT(E->stage >= ENTROPY_WARM);
   1397 
   1398 	/* Acquire the lock, if caller is outside entropy subsystem.  */
   1399 	if (hint == NOTE_SUBMIT)
   1400 		KASSERT(mutex_owned(&E->lock));
   1401 	else
   1402 		mutex_enter(&E->lock);
   1403 
   1404 	/*
   1405 	 * If we still need entropy, can't read anything; if not, can
   1406 	 * read arbitrarily much.
   1407 	 */
   1408 	if (E->needed != 0) {
   1409 		ret = 0;
   1410 	} else {
   1411 		if (atomic_load_relaxed(&entropy_depletion))
   1412 			kn->kn_data = ENTROPY_CAPACITY*NBBY;
   1413 		else
   1414 			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
   1415 		ret = 1;
   1416 	}
   1417 
   1418 	/* Release the lock, if caller is outside entropy subsystem.  */
   1419 	if (hint == NOTE_SUBMIT)
   1420 		KASSERT(mutex_owned(&E->lock));
   1421 	else
   1422 		mutex_exit(&E->lock);
   1423 
   1424 	return ret;
   1425 }
   1426 
   1427 static const struct filterops entropy_read_filtops = {
   1428 	.f_isfd = 1,		/* XXX Makes sense only for /dev/u?random.  */
   1429 	.f_attach = NULL,
   1430 	.f_detach = filt_entropy_read_detach,
   1431 	.f_event = filt_entropy_read_event,
   1432 };
   1433 
   1434 /*
   1435  * entropy_kqfilter(kn)
   1436  *
   1437  *	Register kn to receive entropy event notifications.  May be
   1438  *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
   1439  */
   1440 int
   1441 entropy_kqfilter(struct knote *kn)
   1442 {
   1443 
   1444 	KASSERT(E->stage >= ENTROPY_WARM);
   1445 
   1446 	switch (kn->kn_filter) {
   1447 	case EVFILT_READ:
   1448 		/* Enter into the global select queue.  */
   1449 		mutex_enter(&E->lock);
   1450 		kn->kn_fop = &entropy_read_filtops;
   1451 		SLIST_INSERT_HEAD(&E->selq.sel_klist, kn, kn_selnext);
   1452 		mutex_exit(&E->lock);
   1453 		return 0;
   1454 	case EVFILT_WRITE:
   1455 		/* Can always dump entropy into the system.  */
   1456 		kn->kn_fop = &seltrue_filtops;
   1457 		return 0;
   1458 	default:
   1459 		return EINVAL;
   1460 	}
   1461 }
   1462 
   1463 /*
   1464  * rndsource_setcb(rs, get, getarg)
   1465  *
   1466  *	Set the request callback for the entropy source rs, if it can
   1467  *	provide entropy on demand.  Must precede rnd_attach_source.
   1468  */
   1469 void
   1470 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
   1471     void *getarg)
   1472 {
   1473 
   1474 	rs->get = get;
   1475 	rs->getarg = getarg;
   1476 }
   1477 
   1478 /*
   1479  * rnd_attach_source(rs, name, type, flags)
   1480  *
   1481  *	Attach the entropy source rs.  Must be done after
   1482  *	rndsource_setcb, if any, and before any calls to rnd_add_data.
   1483  */
   1484 void
   1485 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
   1486     uint32_t flags)
   1487 {
   1488 	uint32_t extra[4];
   1489 	unsigned i = 0;
   1490 
   1491 	/* Grab cycle counter to mix extra into the pool.  */
   1492 	extra[i++] = entropy_timer();
   1493 
   1494 	/*
   1495 	 * Apply some standard flags:
   1496 	 *
   1497 	 * - We do not bother with network devices by default, for
   1498 	 *   hysterical raisins (perhaps: because it is often the case
   1499 	 *   that an adversary can influence network packet timings).
   1500 	 */
   1501 	switch (type) {
   1502 	case RND_TYPE_NET:
   1503 		flags |= RND_FLAG_NO_COLLECT;
   1504 		break;
   1505 	}
   1506 
   1507 	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
   1508 	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
   1509 
   1510 	/* Initialize the random source.  */
   1511 	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
   1512 	strlcpy(rs->name, name, sizeof(rs->name));
   1513 	rs->total = 0;
   1514 	rs->type = type;
   1515 	rs->flags = flags;
   1516 	if (E->stage >= ENTROPY_WARM)
   1517 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
   1518 	extra[i++] = entropy_timer();
   1519 
   1520 	/* Wire it into the global list of random sources.  */
   1521 	if (E->stage >= ENTROPY_WARM)
   1522 		mutex_enter(&E->lock);
   1523 	LIST_INSERT_HEAD(&E->sources, rs, list);
   1524 	if (E->stage >= ENTROPY_WARM)
   1525 		mutex_exit(&E->lock);
   1526 	extra[i++] = entropy_timer();
   1527 
   1528 	/* Request that it provide entropy ASAP, if we can.  */
   1529 	if (ISSET(flags, RND_FLAG_HASCB))
   1530 		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
   1531 	extra[i++] = entropy_timer();
   1532 
   1533 	/* Mix the extra into the pool.  */
   1534 	KASSERT(i == __arraycount(extra));
   1535 	entropy_enter(extra, sizeof extra, 0);
   1536 	explicit_memset(extra, 0, sizeof extra);
   1537 }
   1538 
   1539 /*
   1540  * rnd_detach_source(rs)
   1541  *
   1542  *	Detach the entropy source rs.  May sleep waiting for users to
   1543  *	drain.  Further use is not allowed.
   1544  */
   1545 void
   1546 rnd_detach_source(struct krndsource *rs)
   1547 {
   1548 
   1549 	/*
   1550 	 * If we're cold (shouldn't happen, but hey), just remove it
   1551 	 * from the list -- there's nothing allocated.
   1552 	 */
   1553 	if (E->stage == ENTROPY_COLD) {
   1554 		LIST_REMOVE(rs, list);
   1555 		return;
   1556 	}
   1557 
   1558 	/* We may have to wait for entropy_request.  */
   1559 	ASSERT_SLEEPABLE();
   1560 
   1561 	/* Wait until the source list is not in use, and remove it.  */
   1562 	mutex_enter(&E->lock);
   1563 	while (E->sourcelock)
   1564 		cv_wait(&E->cv, &E->lock);
   1565 	LIST_REMOVE(rs, list);
   1566 	mutex_exit(&E->lock);
   1567 
   1568 	/* Free the per-CPU data.  */
   1569 	percpu_free(rs->state, sizeof(struct rndsource_cpu));
   1570 }
   1571 
   1572 /*
   1573  * rnd_lock_sources()
   1574  *
   1575  *	Prevent changes to the list of rndsources while we iterate it.
   1576  *	Interruptible.  Caller must hold the global entropy lock.  If
   1577  *	successful, no rndsource will go away until rnd_unlock_sources
   1578  *	even while the caller releases the global entropy lock.
   1579  */
   1580 static int
   1581 rnd_lock_sources(void)
   1582 {
   1583 	int error;
   1584 
   1585 	KASSERT(mutex_owned(&E->lock));
   1586 
   1587 	while (E->sourcelock) {
   1588 		error = cv_wait_sig(&E->cv, &E->lock);
   1589 		if (error)
   1590 			return error;
   1591 	}
   1592 
   1593 	E->sourcelock = curlwp;
   1594 	return 0;
   1595 }
   1596 
   1597 /*
   1598  * rnd_trylock_sources()
   1599  *
   1600  *	Try to lock the list of sources, but if it's already locked,
   1601  *	fail.  Caller must hold the global entropy lock.  If
   1602  *	successful, no rndsource will go away until rnd_unlock_sources
   1603  *	even while the caller releases the global entropy lock.
   1604  */
   1605 static bool
   1606 rnd_trylock_sources(void)
   1607 {
   1608 
   1609 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1610 
   1611 	if (E->sourcelock)
   1612 		return false;
   1613 	E->sourcelock = curlwp;
   1614 	return true;
   1615 }
   1616 
   1617 /*
   1618  * rnd_unlock_sources()
   1619  *
   1620  *	Unlock the list of sources after rnd_lock_sources or
   1621  *	rnd_trylock_sources.  Caller must hold the global entropy lock.
   1622  */
   1623 static void
   1624 rnd_unlock_sources(void)
   1625 {
   1626 
   1627 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1628 
   1629 	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
   1630 	    curlwp, E->sourcelock);
   1631 	E->sourcelock = NULL;
   1632 	if (E->stage >= ENTROPY_WARM)
   1633 		cv_broadcast(&E->cv);
   1634 }
   1635 
   1636 /*
   1637  * rnd_sources_locked()
   1638  *
   1639  *	True if we hold the list of rndsources locked, for diagnostic
   1640  *	assertions.
   1641  */
   1642 static bool __diagused
   1643 rnd_sources_locked(void)
   1644 {
   1645 
   1646 	return E->sourcelock == curlwp;
   1647 }
   1648 
   1649 /*
   1650  * entropy_request(nbytes)
   1651  *
   1652  *	Request nbytes bytes of entropy from all sources in the system.
   1653  *	OK if we overdo it.  Caller must hold the global entropy lock;
   1654  *	will release and re-acquire it.
   1655  */
   1656 static void
   1657 entropy_request(size_t nbytes)
   1658 {
   1659 	struct krndsource *rs;
   1660 
   1661 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1662 
   1663 	/*
   1664 	 * If there is a request in progress, let it proceed.
   1665 	 * Otherwise, note that a request is in progress to avoid
   1666 	 * reentry and to block rnd_detach_source until we're done.
   1667 	 */
   1668 	if (!rnd_trylock_sources())
   1669 		return;
   1670 	entropy_request_evcnt.ev_count++;
   1671 
   1672 	/* Clamp to the maximum reasonable request.  */
   1673 	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
   1674 
   1675 	/* Walk the list of sources.  */
   1676 	LIST_FOREACH(rs, &E->sources, list) {
   1677 		/* Skip sources without callbacks.  */
   1678 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
   1679 			continue;
   1680 
   1681 		/* Drop the lock while we call the callback.  */
   1682 		if (E->stage >= ENTROPY_WARM)
   1683 			mutex_exit(&E->lock);
   1684 		(*rs->get)(nbytes, rs->getarg);
   1685 		if (E->stage >= ENTROPY_WARM)
   1686 			mutex_enter(&E->lock);
   1687 	}
   1688 
   1689 	/* Notify rnd_detach_source that the request is done.  */
   1690 	rnd_unlock_sources();
   1691 }
   1692 
   1693 /*
   1694  * rnd_add_uint32(rs, value)
   1695  *
   1696  *	Enter 32 bits of data from an entropy source into the pool.
   1697  *
   1698  *	If rs is NULL, may not be called from interrupt context.
   1699  *
   1700  *	If rs is non-NULL, may be called from any context.  May drop
   1701  *	data if called from interrupt context.
   1702  */
   1703 void
   1704 rnd_add_uint32(struct krndsource *rs, uint32_t value)
   1705 {
   1706 
   1707 	rnd_add_data(rs, &value, sizeof value, 0);
   1708 }
   1709 
   1710 void
   1711 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
   1712 {
   1713 
   1714 	rnd_add_data(rs, &value, sizeof value, 0);
   1715 }
   1716 
   1717 void
   1718 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
   1719 {
   1720 
   1721 	rnd_add_data(rs, &value, sizeof value, 0);
   1722 }
   1723 
   1724 /*
   1725  * rnd_add_data(rs, buf, len, entropybits)
   1726  *
   1727  *	Enter data from an entropy source into the pool, with a
   1728  *	driver's estimate of how much entropy the physical source of
   1729  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
   1730  *	estimate and treat it as zero.
   1731  *
   1732  *	If rs is NULL, may not be called from interrupt context.
   1733  *
   1734  *	If rs is non-NULL, may be called from any context.  May drop
   1735  *	data if called from interrupt context.
   1736  */
   1737 void
   1738 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
   1739     uint32_t entropybits)
   1740 {
   1741 	uint32_t extra;
   1742 	uint32_t flags;
   1743 
   1744 	KASSERTMSG(howmany(entropybits, NBBY) <= len,
   1745 	    "%s: impossible entropy rate:"
   1746 	    " %"PRIu32" bits in %"PRIu32"-byte string",
   1747 	    rs ? rs->name : "(anonymous)", entropybits, len);
   1748 
   1749 	/* If there's no rndsource, just enter the data and time now.  */
   1750 	if (rs == NULL) {
   1751 		entropy_enter(buf, len, entropybits);
   1752 		extra = entropy_timer();
   1753 		entropy_enter(&extra, sizeof extra, 0);
   1754 		explicit_memset(&extra, 0, sizeof extra);
   1755 		return;
   1756 	}
   1757 
   1758 	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
   1759 	flags = atomic_load_relaxed(&rs->flags);
   1760 
   1761 	/*
   1762 	 * Skip if:
   1763 	 * - we're not collecting entropy, or
   1764 	 * - the operator doesn't want to collect entropy from this, or
   1765 	 * - neither data nor timings are being collected from this.
   1766 	 */
   1767 	if (!atomic_load_relaxed(&entropy_collection) ||
   1768 	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
   1769 	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
   1770 		return;
   1771 
   1772 	/* If asked, ignore the estimate.  */
   1773 	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
   1774 		entropybits = 0;
   1775 
   1776 	/* If we are collecting data, enter them.  */
   1777 	if (ISSET(flags, RND_FLAG_COLLECT_VALUE))
   1778 		rnd_add_data_1(rs, buf, len, entropybits);
   1779 
   1780 	/* If we are collecting timings, enter one.  */
   1781 	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
   1782 		extra = entropy_timer();
   1783 		rnd_add_data_1(rs, &extra, sizeof extra, 0);
   1784 	}
   1785 }
   1786 
   1787 /*
   1788  * rnd_add_data_1(rs, buf, len, entropybits)
   1789  *
   1790  *	Internal subroutine to call either entropy_enter_intr, if we're
   1791  *	in interrupt context, or entropy_enter if not, and to count the
   1792  *	entropy in an rndsource.
   1793  */
   1794 static void
   1795 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
   1796     uint32_t entropybits)
   1797 {
   1798 	bool fullyused;
   1799 
   1800 	/*
   1801 	 * If we're in interrupt context, use entropy_enter_intr and
   1802 	 * take note of whether it consumed the full sample; if not,
   1803 	 * use entropy_enter, which always consumes the full sample.
   1804 	 */
   1805 	if (curlwp && cpu_intr_p()) {
   1806 		fullyused = entropy_enter_intr(buf, len, entropybits);
   1807 	} else {
   1808 		entropy_enter(buf, len, entropybits);
   1809 		fullyused = true;
   1810 	}
   1811 
   1812 	/*
   1813 	 * If we used the full sample, note how many bits were
   1814 	 * contributed from this source.
   1815 	 */
   1816 	if (fullyused) {
   1817 		if (E->stage < ENTROPY_HOT) {
   1818 			if (E->stage >= ENTROPY_WARM)
   1819 				mutex_enter(&E->lock);
   1820 			rs->total += MIN(UINT_MAX - rs->total, entropybits);
   1821 			if (E->stage >= ENTROPY_WARM)
   1822 				mutex_exit(&E->lock);
   1823 		} else {
   1824 			struct rndsource_cpu *rc = percpu_getref(rs->state);
   1825 			unsigned nbits = rc->rc_nbits;
   1826 
   1827 			nbits += MIN(UINT_MAX - nbits, entropybits);
   1828 			atomic_store_relaxed(&rc->rc_nbits, nbits);
   1829 			percpu_putref(rs->state);
   1830 		}
   1831 	}
   1832 }
   1833 
   1834 /*
   1835  * rnd_add_data_sync(rs, buf, len, entropybits)
   1836  *
   1837  *	Same as rnd_add_data.  Originally used in rndsource callbacks,
   1838  *	to break an unnecessary cycle; no longer really needed.
   1839  */
   1840 void
   1841 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
   1842     uint32_t entropybits)
   1843 {
   1844 
   1845 	rnd_add_data(rs, buf, len, entropybits);
   1846 }
   1847 
   1848 /*
   1849  * rndsource_entropybits(rs)
   1850  *
   1851  *	Return approximately the number of bits of entropy that have
   1852  *	been contributed via rs so far.  Approximate if other CPUs may
   1853  *	be calling rnd_add_data concurrently.
   1854  */
   1855 static unsigned
   1856 rndsource_entropybits(struct krndsource *rs)
   1857 {
   1858 	unsigned nbits = rs->total;
   1859 
   1860 	KASSERT(E->stage >= ENTROPY_WARM);
   1861 	KASSERT(rnd_sources_locked());
   1862 	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
   1863 	return nbits;
   1864 }
   1865 
   1866 static void
   1867 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
   1868 {
   1869 	struct rndsource_cpu *rc = ptr;
   1870 	unsigned *nbitsp = cookie;
   1871 	unsigned cpu_nbits;
   1872 
   1873 	cpu_nbits = atomic_load_relaxed(&rc->rc_nbits);
   1874 	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
   1875 }
   1876 
   1877 /*
   1878  * rndsource_to_user(rs, urs)
   1879  *
   1880  *	Copy a description of rs out to urs for userland.
   1881  */
   1882 static void
   1883 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
   1884 {
   1885 
   1886 	KASSERT(E->stage >= ENTROPY_WARM);
   1887 	KASSERT(rnd_sources_locked());
   1888 
   1889 	/* Avoid kernel memory disclosure.  */
   1890 	memset(urs, 0, sizeof(*urs));
   1891 
   1892 	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
   1893 	strlcpy(urs->name, rs->name, sizeof(urs->name));
   1894 	urs->total = rndsource_entropybits(rs);
   1895 	urs->type = rs->type;
   1896 	urs->flags = atomic_load_relaxed(&rs->flags);
   1897 }
   1898 
   1899 /*
   1900  * rndsource_to_user_est(rs, urse)
   1901  *
   1902  *	Copy a description of rs and estimation statistics out to urse
   1903  *	for userland.
   1904  */
   1905 static void
   1906 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
   1907 {
   1908 
   1909 	KASSERT(E->stage >= ENTROPY_WARM);
   1910 	KASSERT(rnd_sources_locked());
   1911 
   1912 	/* Avoid kernel memory disclosure.  */
   1913 	memset(urse, 0, sizeof(*urse));
   1914 
   1915 	/* Copy out the rndsource description.  */
   1916 	rndsource_to_user(rs, &urse->rt);
   1917 
   1918 	/* Zero out the statistics because we don't do estimation.  */
   1919 	urse->dt_samples = 0;
   1920 	urse->dt_total = 0;
   1921 	urse->dv_samples = 0;
   1922 	urse->dv_total = 0;
   1923 }
   1924 
   1925 /*
   1926  * entropy_ioctl(cmd, data)
   1927  *
   1928  *	Handle various /dev/random ioctl queries.
   1929  */
   1930 int
   1931 entropy_ioctl(unsigned long cmd, void *data)
   1932 {
   1933 	struct krndsource *rs;
   1934 	bool privileged;
   1935 	int error;
   1936 
   1937 	KASSERT(E->stage >= ENTROPY_WARM);
   1938 
   1939 	/* Verify user's authorization to perform the ioctl.  */
   1940 	switch (cmd) {
   1941 	case RNDGETENTCNT:
   1942 	case RNDGETPOOLSTAT:
   1943 	case RNDGETSRCNUM:
   1944 	case RNDGETSRCNAME:
   1945 	case RNDGETESTNUM:
   1946 	case RNDGETESTNAME:
   1947 		error = kauth_authorize_device(curlwp->l_cred,
   1948 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
   1949 		break;
   1950 	case RNDCTL:
   1951 		error = kauth_authorize_device(curlwp->l_cred,
   1952 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
   1953 		break;
   1954 	case RNDADDDATA:
   1955 		error = kauth_authorize_device(curlwp->l_cred,
   1956 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
   1957 		/* Ascertain whether the user's inputs should be counted.  */
   1958 		if (kauth_authorize_device(curlwp->l_cred,
   1959 			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
   1960 			NULL, NULL, NULL, NULL) == 0)
   1961 			privileged = true;
   1962 		break;
   1963 	default: {
   1964 		/*
   1965 		 * XXX Hack to avoid changing module ABI so this can be
   1966 		 * pulled up.  Later, we can just remove the argument.
   1967 		 */
   1968 		static const struct fileops fops = {
   1969 			.fo_ioctl = rnd_system_ioctl,
   1970 		};
   1971 		struct file f = {
   1972 			.f_ops = &fops,
   1973 		};
   1974 		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
   1975 		    enosys(), error);
   1976 #if defined(_LP64)
   1977 		if (error == ENOSYS)
   1978 			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
   1979 			    enosys(), error);
   1980 #endif
   1981 		if (error == ENOSYS)
   1982 			error = ENOTTY;
   1983 		break;
   1984 	}
   1985 	}
   1986 
   1987 	/* If anything went wrong with authorization, stop here.  */
   1988 	if (error)
   1989 		return error;
   1990 
   1991 	/* Dispatch on the command.  */
   1992 	switch (cmd) {
   1993 	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
   1994 		uint32_t *countp = data;
   1995 
   1996 		mutex_enter(&E->lock);
   1997 		*countp = ENTROPY_CAPACITY*NBBY - E->needed;
   1998 		mutex_exit(&E->lock);
   1999 
   2000 		break;
   2001 	}
   2002 	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
   2003 		rndpoolstat_t *pstat = data;
   2004 
   2005 		mutex_enter(&E->lock);
   2006 
   2007 		/* parameters */
   2008 		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
   2009 		pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */
   2010 		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
   2011 
   2012 		/* state */
   2013 		pstat->added = 0; /* XXX total entropy_enter count */
   2014 		pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed;
   2015 		pstat->removed = 0; /* XXX total entropy_extract count */
   2016 		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
   2017 		pstat->generated = 0; /* XXX bits of data...fabricated? */
   2018 
   2019 		mutex_exit(&E->lock);
   2020 		break;
   2021 	}
   2022 	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
   2023 		rndstat_t *stat = data;
   2024 		uint32_t start = 0, i = 0;
   2025 
   2026 		/* Skip if none requested; fail if too many requested.  */
   2027 		if (stat->count == 0)
   2028 			break;
   2029 		if (stat->count > RND_MAXSTATCOUNT)
   2030 			return EINVAL;
   2031 
   2032 		/*
   2033 		 * Under the lock, find the first one, copy out as many
   2034 		 * as requested, and report how many we copied out.
   2035 		 */
   2036 		mutex_enter(&E->lock);
   2037 		error = rnd_lock_sources();
   2038 		if (error) {
   2039 			mutex_exit(&E->lock);
   2040 			return error;
   2041 		}
   2042 		LIST_FOREACH(rs, &E->sources, list) {
   2043 			if (start++ == stat->start)
   2044 				break;
   2045 		}
   2046 		while (i < stat->count && rs != NULL) {
   2047 			mutex_exit(&E->lock);
   2048 			rndsource_to_user(rs, &stat->source[i++]);
   2049 			mutex_enter(&E->lock);
   2050 			rs = LIST_NEXT(rs, list);
   2051 		}
   2052 		KASSERT(i <= stat->count);
   2053 		stat->count = i;
   2054 		rnd_unlock_sources();
   2055 		mutex_exit(&E->lock);
   2056 		break;
   2057 	}
   2058 	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
   2059 		rndstat_est_t *estat = data;
   2060 		uint32_t start = 0, i = 0;
   2061 
   2062 		/* Skip if none requested; fail if too many requested.  */
   2063 		if (estat->count == 0)
   2064 			break;
   2065 		if (estat->count > RND_MAXSTATCOUNT)
   2066 			return EINVAL;
   2067 
   2068 		/*
   2069 		 * Under the lock, find the first one, copy out as many
   2070 		 * as requested, and report how many we copied out.
   2071 		 */
   2072 		mutex_enter(&E->lock);
   2073 		error = rnd_lock_sources();
   2074 		if (error) {
   2075 			mutex_exit(&E->lock);
   2076 			return error;
   2077 		}
   2078 		LIST_FOREACH(rs, &E->sources, list) {
   2079 			if (start++ == estat->start)
   2080 				break;
   2081 		}
   2082 		while (i < estat->count && rs != NULL) {
   2083 			mutex_exit(&E->lock);
   2084 			rndsource_to_user_est(rs, &estat->source[i++]);
   2085 			mutex_enter(&E->lock);
   2086 			rs = LIST_NEXT(rs, list);
   2087 		}
   2088 		KASSERT(i <= estat->count);
   2089 		estat->count = i;
   2090 		rnd_unlock_sources();
   2091 		mutex_exit(&E->lock);
   2092 		break;
   2093 	}
   2094 	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
   2095 		rndstat_name_t *nstat = data;
   2096 		const size_t n = sizeof(rs->name);
   2097 
   2098 		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
   2099 
   2100 		/*
   2101 		 * Under the lock, search by name.  If found, copy it
   2102 		 * out; if not found, fail with ENOENT.
   2103 		 */
   2104 		mutex_enter(&E->lock);
   2105 		error = rnd_lock_sources();
   2106 		if (error) {
   2107 			mutex_exit(&E->lock);
   2108 			return error;
   2109 		}
   2110 		LIST_FOREACH(rs, &E->sources, list) {
   2111 			if (strncmp(rs->name, nstat->name, n) == 0)
   2112 				break;
   2113 		}
   2114 		if (rs != NULL) {
   2115 			mutex_exit(&E->lock);
   2116 			rndsource_to_user(rs, &nstat->source);
   2117 			mutex_enter(&E->lock);
   2118 		} else {
   2119 			error = ENOENT;
   2120 		}
   2121 		rnd_unlock_sources();
   2122 		mutex_exit(&E->lock);
   2123 		break;
   2124 	}
   2125 	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
   2126 		rndstat_est_name_t *enstat = data;
   2127 		const size_t n = sizeof(rs->name);
   2128 
   2129 		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
   2130 
   2131 		/*
   2132 		 * Under the lock, search by name.  If found, copy it
   2133 		 * out; if not found, fail with ENOENT.
   2134 		 */
   2135 		mutex_enter(&E->lock);
   2136 		error = rnd_lock_sources();
   2137 		if (error) {
   2138 			mutex_exit(&E->lock);
   2139 			return error;
   2140 		}
   2141 		LIST_FOREACH(rs, &E->sources, list) {
   2142 			if (strncmp(rs->name, enstat->name, n) == 0)
   2143 				break;
   2144 		}
   2145 		if (rs != NULL) {
   2146 			mutex_exit(&E->lock);
   2147 			rndsource_to_user_est(rs, &enstat->source);
   2148 			mutex_enter(&E->lock);
   2149 		} else {
   2150 			error = ENOENT;
   2151 		}
   2152 		rnd_unlock_sources();
   2153 		mutex_exit(&E->lock);
   2154 		break;
   2155 	}
   2156 	case RNDCTL: {		/* Modify entropy source flags.  */
   2157 		rndctl_t *rndctl = data;
   2158 		const size_t n = sizeof(rs->name);
   2159 		uint32_t flags;
   2160 
   2161 		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
   2162 
   2163 		/* Whitelist the flags that user can change.  */
   2164 		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
   2165 
   2166 		/*
   2167 		 * For each matching rndsource, either by type if
   2168 		 * specified or by name if not, set the masked flags.
   2169 		 */
   2170 		mutex_enter(&E->lock);
   2171 		LIST_FOREACH(rs, &E->sources, list) {
   2172 			if (rndctl->type != 0xff) {
   2173 				if (rs->type != rndctl->type)
   2174 					continue;
   2175 			} else {
   2176 				if (strncmp(rs->name, rndctl->name, n) != 0)
   2177 					continue;
   2178 			}
   2179 			flags = rs->flags & ~rndctl->mask;
   2180 			flags |= rndctl->flags & rndctl->mask;
   2181 			atomic_store_relaxed(&rs->flags, flags);
   2182 		}
   2183 		mutex_exit(&E->lock);
   2184 		break;
   2185 	}
   2186 	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
   2187 		rnddata_t *rdata = data;
   2188 		unsigned entropybits = 0;
   2189 
   2190 		if (!atomic_load_relaxed(&entropy_collection))
   2191 			break;	/* thanks but no thanks */
   2192 		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
   2193 			return EINVAL;
   2194 
   2195 		/*
   2196 		 * This ioctl serves as the userland alternative a
   2197 		 * bootloader-provided seed -- typically furnished by
   2198 		 * /etc/rc.d/random_seed.  We accept the user's entropy
   2199 		 * claim only if
   2200 		 *
   2201 		 * (a) the user is privileged, and
   2202 		 * (b) we have not entered a bootloader seed.
   2203 		 *
   2204 		 * under the assumption that the user may use this to
   2205 		 * load a seed from disk that we have already loaded
   2206 		 * from the bootloader, so we don't double-count it.
   2207 		 */
   2208 		if (privileged && rdata->entropy && rdata->len) {
   2209 			mutex_enter(&E->lock);
   2210 			if (!E->seeded) {
   2211 				entropybits = MIN(rdata->entropy,
   2212 				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
   2213 				E->seeded = true;
   2214 			}
   2215 			mutex_exit(&E->lock);
   2216 		}
   2217 
   2218 		/* Enter the data and consolidate entropy.  */
   2219 		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
   2220 		    entropybits);
   2221 		entropy_consolidate();
   2222 		break;
   2223 	}
   2224 	default:
   2225 		error = ENOTTY;
   2226 	}
   2227 
   2228 	/* Return any error that may have come up.  */
   2229 	return error;
   2230 }
   2231 
   2232 /* Legacy entry points */
   2233 
   2234 void
   2235 rnd_seed(void *seed, size_t len)
   2236 {
   2237 
   2238 	if (len != sizeof(rndsave_t)) {
   2239 		printf("entropy: invalid seed length: %zu,"
   2240 		    " expected sizeof(rndsave_t) = %zu\n",
   2241 		    len, sizeof(rndsave_t));
   2242 		return;
   2243 	}
   2244 	entropy_seed(seed);
   2245 }
   2246 
   2247 void
   2248 rnd_init(void)
   2249 {
   2250 
   2251 	entropy_init();
   2252 }
   2253 
   2254 void
   2255 rnd_init_softint(void)
   2256 {
   2257 
   2258 	entropy_init_late();
   2259 }
   2260 
   2261 int
   2262 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
   2263 {
   2264 
   2265 	return entropy_ioctl(cmd, data);
   2266 }
   2267