Home | History | Annotate | Line # | Download | only in kern
kern_entropy.c revision 1.26
      1 /*	$NetBSD: kern_entropy.c,v 1.26 2021/01/11 02:18:40 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Entropy subsystem
     34  *
     35  *	* Each CPU maintains a per-CPU entropy pool so that gathering
     36  *	  entropy requires no interprocessor synchronization, except
     37  *	  early at boot when we may be scrambling to gather entropy as
     38  *	  soon as possible.
     39  *
     40  *	  - entropy_enter gathers entropy and never drops it on the
     41  *	    floor, at the cost of sometimes having to do cryptography.
     42  *
     43  *	  - entropy_enter_intr gathers entropy or drops it on the
     44  *	    floor, with low latency.  Work to stir the pool or kick the
     45  *	    housekeeping thread is scheduled in soft interrupts.
     46  *
     47  *	* entropy_enter immediately enters into the global pool if it
     48  *	  can transition to full entropy in one swell foop.  Otherwise,
     49  *	  it defers to a housekeeping thread that consolidates entropy,
     50  *	  but only when the CPUs collectively have full entropy, in
     51  *	  order to mitigate iterative-guessing attacks.
     52  *
     53  *	* The entropy housekeeping thread continues to consolidate
     54  *	  entropy even after we think we have full entropy, in case we
     55  *	  are wrong, but is limited to one discretionary consolidation
     56  *	  per minute, and only when new entropy is actually coming in,
     57  *	  to limit performance impact.
     58  *
     59  *	* The entropy epoch is the number that changes when we
     60  *	  transition from partial entropy to full entropy, so that
     61  *	  users can easily determine when to reseed.  This also
     62  *	  facilitates an operator explicitly causing everything to
     63  *	  reseed by sysctl -w kern.entropy.consolidate=1.
     64  *
     65  *	* No entropy estimation based on the sample values, which is a
     66  *	  contradiction in terms and a potential source of side
     67  *	  channels.  It is the responsibility of the driver author to
     68  *	  study how predictable the physical source of input can ever
     69  *	  be, and to furnish a lower bound on the amount of entropy it
     70  *	  has.
     71  *
     72  *	* Entropy depletion is available for testing (or if you're into
     73  *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
     74  *	  the logic to support it is small, to minimize chance of bugs.
     75  */
     76 
     77 #include <sys/cdefs.h>
     78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.26 2021/01/11 02:18:40 riastradh Exp $");
     79 
     80 #include <sys/param.h>
     81 #include <sys/types.h>
     82 #include <sys/atomic.h>
     83 #include <sys/compat_stub.h>
     84 #include <sys/condvar.h>
     85 #include <sys/cpu.h>
     86 #include <sys/entropy.h>
     87 #include <sys/errno.h>
     88 #include <sys/evcnt.h>
     89 #include <sys/event.h>
     90 #include <sys/file.h>
     91 #include <sys/intr.h>
     92 #include <sys/kauth.h>
     93 #include <sys/kernel.h>
     94 #include <sys/kmem.h>
     95 #include <sys/kthread.h>
     96 #include <sys/module_hook.h>
     97 #include <sys/mutex.h>
     98 #include <sys/percpu.h>
     99 #include <sys/poll.h>
    100 #include <sys/queue.h>
    101 #include <sys/rnd.h>		/* legacy kernel API */
    102 #include <sys/rndio.h>		/* userland ioctl interface */
    103 #include <sys/rndsource.h>	/* kernel rndsource driver API */
    104 #include <sys/select.h>
    105 #include <sys/selinfo.h>
    106 #include <sys/sha1.h>		/* for boot seed checksum */
    107 #include <sys/stdint.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/syslog.h>
    110 #include <sys/systm.h>
    111 #include <sys/time.h>
    112 #include <sys/xcall.h>
    113 
    114 #include <lib/libkern/entpool.h>
    115 
    116 #include <machine/limits.h>
    117 
    118 #ifdef __HAVE_CPU_COUNTER
    119 #include <machine/cpu_counter.h>
    120 #endif
    121 
    122 /*
    123  * struct entropy_cpu
    124  *
    125  *	Per-CPU entropy state.  The pool is allocated separately
    126  *	because percpu(9) sometimes moves per-CPU objects around
    127  *	without zeroing them, which would lead to unwanted copies of
    128  *	sensitive secrets.  The evcnt is allocated separately becuase
    129  *	evcnt(9) assumes it stays put in memory.
    130  */
    131 struct entropy_cpu {
    132 	struct evcnt		*ec_softint_evcnt;
    133 	struct entpool		*ec_pool;
    134 	unsigned		ec_pending;
    135 	bool			ec_locked;
    136 };
    137 
    138 /*
    139  * struct rndsource_cpu
    140  *
    141  *	Per-CPU rndsource state.
    142  */
    143 struct rndsource_cpu {
    144 	unsigned		rc_nbits; /* bits of entropy added */
    145 };
    146 
    147 /*
    148  * entropy_global (a.k.a. E for short in this file)
    149  *
    150  *	Global entropy state.  Writes protected by the global lock.
    151  *	Some fields, marked (A), can be read outside the lock, and are
    152  *	maintained with atomic_load/store_relaxed.
    153  */
    154 struct {
    155 	kmutex_t	lock;		/* covers all global state */
    156 	struct entpool	pool;		/* global pool for extraction */
    157 	unsigned	needed;		/* (A) needed globally */
    158 	unsigned	pending;	/* (A) pending in per-CPU pools */
    159 	unsigned	timestamp;	/* (A) time of last consolidation */
    160 	unsigned	epoch;		/* (A) changes when needed -> 0 */
    161 	kcondvar_t	cv;		/* notifies state changes */
    162 	struct selinfo	selq;		/* notifies needed -> 0 */
    163 	struct lwp	*sourcelock;	/* lock on list of sources */
    164 	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
    165 	enum entropy_stage {
    166 		ENTROPY_COLD = 0, /* single-threaded */
    167 		ENTROPY_WARM,	  /* multi-threaded at boot before CPUs */
    168 		ENTROPY_HOT,	  /* multi-threaded multi-CPU */
    169 	}		stage;
    170 	bool		consolidate;	/* kick thread to consolidate */
    171 	bool		seed_rndsource;	/* true if seed source is attached */
    172 	bool		seeded;		/* true if seed file already loaded */
    173 } entropy_global __cacheline_aligned = {
    174 	/* Fields that must be initialized when the kernel is loaded.  */
    175 	.needed = ENTROPY_CAPACITY*NBBY,
    176 	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
    177 	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
    178 	.stage = ENTROPY_COLD,
    179 };
    180 
    181 #define	E	(&entropy_global)	/* declutter */
    182 
    183 /* Read-mostly globals */
    184 static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
    185 static void		*entropy_sih __read_mostly; /* softint handler */
    186 static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
    187 
    188 int rnd_initial_entropy __read_mostly; /* XXX legacy */
    189 
    190 static struct krndsource seed_rndsource __read_mostly;
    191 
    192 /*
    193  * Event counters
    194  *
    195  *	Must be careful with adding these because they can serve as
    196  *	side channels.
    197  */
    198 static struct evcnt entropy_discretionary_evcnt =
    199     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
    200 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
    201 static struct evcnt entropy_immediate_evcnt =
    202     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
    203 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
    204 static struct evcnt entropy_partial_evcnt =
    205     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
    206 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
    207 static struct evcnt entropy_consolidate_evcnt =
    208     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
    209 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
    210 static struct evcnt entropy_extract_intr_evcnt =
    211     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr");
    212 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt);
    213 static struct evcnt entropy_extract_fail_evcnt =
    214     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
    215 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
    216 static struct evcnt entropy_request_evcnt =
    217     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
    218 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
    219 static struct evcnt entropy_deplete_evcnt =
    220     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
    221 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
    222 static struct evcnt entropy_notify_evcnt =
    223     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
    224 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
    225 
    226 /* Sysctl knobs */
    227 static bool	entropy_collection = 1;
    228 static bool	entropy_depletion = 0; /* Silly!  */
    229 
    230 static const struct sysctlnode	*entropy_sysctlroot;
    231 static struct sysctllog		*entropy_sysctllog;
    232 
    233 /* Forward declarations */
    234 static void	entropy_init_cpu(void *, void *, struct cpu_info *);
    235 static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
    236 static void	entropy_account_cpu(struct entropy_cpu *);
    237 static void	entropy_enter(const void *, size_t, unsigned);
    238 static bool	entropy_enter_intr(const void *, size_t, unsigned);
    239 static void	entropy_softintr(void *);
    240 static void	entropy_thread(void *);
    241 static uint32_t	entropy_pending(void);
    242 static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
    243 static void	entropy_do_consolidate(void);
    244 static void	entropy_consolidate_xc(void *, void *);
    245 static void	entropy_notify(void);
    246 static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
    247 static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
    248 static void	filt_entropy_read_detach(struct knote *);
    249 static int	filt_entropy_read_event(struct knote *, long);
    250 static void	entropy_request(size_t);
    251 static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
    252 		    uint32_t);
    253 static unsigned	rndsource_entropybits(struct krndsource *);
    254 static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
    255 static void	rndsource_to_user(struct krndsource *, rndsource_t *);
    256 static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
    257 
    258 /*
    259  * entropy_timer()
    260  *
    261  *	Cycle counter, time counter, or anything that changes a wee bit
    262  *	unpredictably.
    263  */
    264 static inline uint32_t
    265 entropy_timer(void)
    266 {
    267 	struct bintime bt;
    268 	uint32_t v;
    269 
    270 	/* If we have a CPU cycle counter, use the low 32 bits.  */
    271 #ifdef __HAVE_CPU_COUNTER
    272 	if (__predict_true(cpu_hascounter()))
    273 		return cpu_counter32();
    274 #endif	/* __HAVE_CPU_COUNTER */
    275 
    276 	/* If we're cold, tough.  Can't binuptime while cold.  */
    277 	if (__predict_false(cold))
    278 		return 0;
    279 
    280 	/* Fold the 128 bits of binuptime into 32 bits.  */
    281 	binuptime(&bt);
    282 	v = bt.frac;
    283 	v ^= bt.frac >> 32;
    284 	v ^= bt.sec;
    285 	v ^= bt.sec >> 32;
    286 	return v;
    287 }
    288 
    289 static void
    290 attach_seed_rndsource(void)
    291 {
    292 
    293 	/*
    294 	 * First called no later than entropy_init, while we are still
    295 	 * single-threaded, so no need for RUN_ONCE.
    296 	 */
    297 	if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
    298 		return;
    299 	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
    300 	    RND_FLAG_COLLECT_VALUE);
    301 	E->seed_rndsource = true;
    302 }
    303 
    304 /*
    305  * entropy_init()
    306  *
    307  *	Initialize the entropy subsystem.  Panic on failure.
    308  *
    309  *	Requires percpu(9) and sysctl(9) to be initialized.
    310  */
    311 static void
    312 entropy_init(void)
    313 {
    314 	uint32_t extra[2];
    315 	struct krndsource *rs;
    316 	unsigned i = 0;
    317 
    318 	KASSERT(E->stage == ENTROPY_COLD);
    319 
    320 	/* Grab some cycle counts early at boot.  */
    321 	extra[i++] = entropy_timer();
    322 
    323 	/* Run the entropy pool cryptography self-test.  */
    324 	if (entpool_selftest() == -1)
    325 		panic("entropy pool crypto self-test failed");
    326 
    327 	/* Create the sysctl directory.  */
    328 	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
    329 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
    330 	    SYSCTL_DESCR("Entropy (random number sources) options"),
    331 	    NULL, 0, NULL, 0,
    332 	    CTL_KERN, CTL_CREATE, CTL_EOL);
    333 
    334 	/* Create the sysctl knobs.  */
    335 	/* XXX These shouldn't be writable at securelevel>0.  */
    336 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    337 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
    338 	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
    339 	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
    340 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    341 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
    342 	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
    343 	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
    344 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    345 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
    346 	    SYSCTL_DESCR("Trigger entropy consolidation now"),
    347 	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    348 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    349 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
    350 	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
    351 	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    352 	/* XXX These should maybe not be readable at securelevel>0.  */
    353 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    354 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    355 	    "needed", SYSCTL_DESCR("Systemwide entropy deficit"),
    356 	    NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL);
    357 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    358 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    359 	    "pending", SYSCTL_DESCR("Entropy pending on CPUs"),
    360 	    NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL);
    361 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    362 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    363 	    "epoch", SYSCTL_DESCR("Entropy epoch"),
    364 	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
    365 
    366 	/* Initialize the global state for multithreaded operation.  */
    367 	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM);
    368 	cv_init(&E->cv, "entropy");
    369 	selinit(&E->selq);
    370 
    371 	/* Make sure the seed source is attached.  */
    372 	attach_seed_rndsource();
    373 
    374 	/* Note if the bootloader didn't provide a seed.  */
    375 	if (!E->seeded)
    376 		printf("entropy: no seed from bootloader\n");
    377 
    378 	/* Allocate the per-CPU records for all early entropy sources.  */
    379 	LIST_FOREACH(rs, &E->sources, list)
    380 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
    381 
    382 	/* Enter the boot cycle count to get started.  */
    383 	extra[i++] = entropy_timer();
    384 	KASSERT(i == __arraycount(extra));
    385 	entropy_enter(extra, sizeof extra, 0);
    386 	explicit_memset(extra, 0, sizeof extra);
    387 
    388 	/* We are now ready for multi-threaded operation.  */
    389 	E->stage = ENTROPY_WARM;
    390 }
    391 
    392 /*
    393  * entropy_init_late()
    394  *
    395  *	Late initialization.  Panic on failure.
    396  *
    397  *	Requires CPUs to have been detected and LWPs to have started.
    398  */
    399 static void
    400 entropy_init_late(void)
    401 {
    402 	int error;
    403 
    404 	KASSERT(E->stage == ENTROPY_WARM);
    405 
    406 	/* Allocate and initialize the per-CPU state.  */
    407 	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
    408 	    entropy_init_cpu, entropy_fini_cpu, NULL);
    409 
    410 	/*
    411 	 * Establish the softint at the highest softint priority level.
    412 	 * Must happen after CPU detection.
    413 	 */
    414 	entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
    415 	    &entropy_softintr, NULL);
    416 	if (entropy_sih == NULL)
    417 		panic("unable to establish entropy softint");
    418 
    419 	/*
    420 	 * Create the entropy housekeeping thread.  Must happen after
    421 	 * lwpinit.
    422 	 */
    423 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
    424 	    entropy_thread, NULL, &entropy_lwp, "entbutler");
    425 	if (error)
    426 		panic("unable to create entropy housekeeping thread: %d",
    427 		    error);
    428 
    429 	/*
    430 	 * Wait until the per-CPU initialization has hit all CPUs
    431 	 * before proceeding to mark the entropy system hot.
    432 	 */
    433 	xc_barrier(XC_HIGHPRI);
    434 	E->stage = ENTROPY_HOT;
    435 }
    436 
    437 /*
    438  * entropy_init_cpu(ptr, cookie, ci)
    439  *
    440  *	percpu(9) constructor for per-CPU entropy pool.
    441  */
    442 static void
    443 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    444 {
    445 	struct entropy_cpu *ec = ptr;
    446 
    447 	ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt),
    448 	    KM_SLEEP);
    449 	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
    450 	ec->ec_pending = 0;
    451 	ec->ec_locked = false;
    452 
    453 	evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL,
    454 	    ci->ci_cpuname, "entropy softint");
    455 }
    456 
    457 /*
    458  * entropy_fini_cpu(ptr, cookie, ci)
    459  *
    460  *	percpu(9) destructor for per-CPU entropy pool.
    461  */
    462 static void
    463 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    464 {
    465 	struct entropy_cpu *ec = ptr;
    466 
    467 	/*
    468 	 * Zero any lingering data.  Disclosure of the per-CPU pool
    469 	 * shouldn't retroactively affect the security of any keys
    470 	 * generated, because entpool(9) erases whatever we have just
    471 	 * drawn out of any pool, but better safe than sorry.
    472 	 */
    473 	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
    474 
    475 	evcnt_detach(ec->ec_softint_evcnt);
    476 
    477 	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
    478 	kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt));
    479 }
    480 
    481 /*
    482  * entropy_seed(seed)
    483  *
    484  *	Seed the entropy pool with seed.  Meant to be called as early
    485  *	as possible by the bootloader; may be called before or after
    486  *	entropy_init.  Must be called before system reaches userland.
    487  *	Must be called in thread or soft interrupt context, not in hard
    488  *	interrupt context.  Must be called at most once.
    489  *
    490  *	Overwrites the seed in place.  Caller may then free the memory.
    491  */
    492 static void
    493 entropy_seed(rndsave_t *seed)
    494 {
    495 	SHA1_CTX ctx;
    496 	uint8_t digest[SHA1_DIGEST_LENGTH];
    497 	bool seeded;
    498 
    499 	/*
    500 	 * Verify the checksum.  If the checksum fails, take the data
    501 	 * but ignore the entropy estimate -- the file may have been
    502 	 * incompletely written with garbage, which is harmless to add
    503 	 * but may not be as unpredictable as alleged.
    504 	 */
    505 	SHA1Init(&ctx);
    506 	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
    507 	SHA1Update(&ctx, seed->data, sizeof(seed->data));
    508 	SHA1Final(digest, &ctx);
    509 	CTASSERT(sizeof(seed->digest) == sizeof(digest));
    510 	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
    511 		printf("entropy: invalid seed checksum\n");
    512 		seed->entropy = 0;
    513 	}
    514 	explicit_memset(&ctx, 0, sizeof ctx);
    515 	explicit_memset(digest, 0, sizeof digest);
    516 
    517 	/*
    518 	 * If the entropy is insensibly large, try byte-swapping.
    519 	 * Otherwise assume the file is corrupted and act as though it
    520 	 * has zero entropy.
    521 	 */
    522 	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
    523 		seed->entropy = bswap32(seed->entropy);
    524 		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
    525 			seed->entropy = 0;
    526 	}
    527 
    528 	/* Make sure the seed source is attached.  */
    529 	attach_seed_rndsource();
    530 
    531 	/* Test and set E->seeded.  */
    532 	if (E->stage >= ENTROPY_WARM)
    533 		mutex_enter(&E->lock);
    534 	seeded = E->seeded;
    535 	E->seeded = (seed->entropy > 0);
    536 	if (E->stage >= ENTROPY_WARM)
    537 		mutex_exit(&E->lock);
    538 
    539 	/*
    540 	 * If we've been seeded, may be re-entering the same seed
    541 	 * (e.g., bootloader vs module init, or something).  No harm in
    542 	 * entering it twice, but it contributes no additional entropy.
    543 	 */
    544 	if (seeded) {
    545 		printf("entropy: double-seeded by bootloader\n");
    546 		seed->entropy = 0;
    547 	} else {
    548 		printf("entropy: entering seed from bootloader"
    549 		    " with %u bits of entropy\n", (unsigned)seed->entropy);
    550 	}
    551 
    552 	/* Enter it into the pool and promptly zero it.  */
    553 	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
    554 	    seed->entropy);
    555 	explicit_memset(seed, 0, sizeof(*seed));
    556 }
    557 
    558 /*
    559  * entropy_bootrequest()
    560  *
    561  *	Request entropy from all sources at boot, once config is
    562  *	complete and interrupts are running.
    563  */
    564 void
    565 entropy_bootrequest(void)
    566 {
    567 
    568 	KASSERT(E->stage >= ENTROPY_WARM);
    569 
    570 	/*
    571 	 * Request enough to satisfy the maximum entropy shortage.
    572 	 * This is harmless overkill if the bootloader provided a seed.
    573 	 */
    574 	mutex_enter(&E->lock);
    575 	entropy_request(ENTROPY_CAPACITY);
    576 	mutex_exit(&E->lock);
    577 }
    578 
    579 /*
    580  * entropy_epoch()
    581  *
    582  *	Returns the current entropy epoch.  If this changes, you should
    583  *	reseed.  If -1, means system entropy has not yet reached full
    584  *	entropy or been explicitly consolidated; never reverts back to
    585  *	-1.  Never zero, so you can always use zero as an uninitialized
    586  *	sentinel value meaning `reseed ASAP'.
    587  *
    588  *	Usage model:
    589  *
    590  *		struct foo {
    591  *			struct crypto_prng prng;
    592  *			unsigned epoch;
    593  *		} *foo;
    594  *
    595  *		unsigned epoch = entropy_epoch();
    596  *		if (__predict_false(epoch != foo->epoch)) {
    597  *			uint8_t seed[32];
    598  *			if (entropy_extract(seed, sizeof seed, 0) != 0)
    599  *				warn("no entropy");
    600  *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
    601  *			foo->epoch = epoch;
    602  *		}
    603  */
    604 unsigned
    605 entropy_epoch(void)
    606 {
    607 
    608 	/*
    609 	 * Unsigned int, so no need for seqlock for an atomic read, but
    610 	 * make sure we read it afresh each time.
    611 	 */
    612 	return atomic_load_relaxed(&E->epoch);
    613 }
    614 
    615 /*
    616  * entropy_ready()
    617  *
    618  *	True if the entropy pool has full entropy.
    619  */
    620 bool
    621 entropy_ready(void)
    622 {
    623 
    624 	return atomic_load_relaxed(&E->needed) == 0;
    625 }
    626 
    627 /*
    628  * entropy_account_cpu(ec)
    629  *
    630  *	Consider whether to consolidate entropy into the global pool
    631  *	after we just added some into the current CPU's pending pool.
    632  *
    633  *	- If this CPU can provide enough entropy now, do so.
    634  *
    635  *	- If this and whatever else is available on other CPUs can
    636  *	  provide enough entropy, kick the consolidation thread.
    637  *
    638  *	- Otherwise, do as little as possible, except maybe consolidate
    639  *	  entropy at most once a minute.
    640  *
    641  *	Caller must be bound to a CPU and therefore have exclusive
    642  *	access to ec.  Will acquire and release the global lock.
    643  */
    644 static void
    645 entropy_account_cpu(struct entropy_cpu *ec)
    646 {
    647 	unsigned diff;
    648 
    649 	KASSERT(E->stage == ENTROPY_HOT);
    650 
    651 	/*
    652 	 * If there's no entropy needed, and entropy has been
    653 	 * consolidated in the last minute, do nothing.
    654 	 */
    655 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
    656 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
    657 	    __predict_true((time_uptime - E->timestamp) <= 60))
    658 		return;
    659 
    660 	/* If there's nothing pending, stop here.  */
    661 	if (ec->ec_pending == 0)
    662 		return;
    663 
    664 	/* Consider consolidation, under the lock.  */
    665 	mutex_enter(&E->lock);
    666 	if (E->needed != 0 && E->needed <= ec->ec_pending) {
    667 		/*
    668 		 * If we have not yet attained full entropy but we can
    669 		 * now, do so.  This way we disseminate entropy
    670 		 * promptly when it becomes available early at boot;
    671 		 * otherwise we leave it to the entropy consolidation
    672 		 * thread, which is rate-limited to mitigate side
    673 		 * channels and abuse.
    674 		 */
    675 		uint8_t buf[ENTPOOL_CAPACITY];
    676 
    677 		/* Transfer from the local pool to the global pool.  */
    678 		entpool_extract(ec->ec_pool, buf, sizeof buf);
    679 		entpool_enter(&E->pool, buf, sizeof buf);
    680 		atomic_store_relaxed(&ec->ec_pending, 0);
    681 		atomic_store_relaxed(&E->needed, 0);
    682 
    683 		/* Notify waiters that we now have full entropy.  */
    684 		entropy_notify();
    685 		entropy_immediate_evcnt.ev_count++;
    686 	} else {
    687 		/* Record how much we can add to the global pool.  */
    688 		diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending);
    689 		E->pending += diff;
    690 		atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff);
    691 
    692 		/*
    693 		 * This should have made a difference unless we were
    694 		 * already saturated.
    695 		 */
    696 		KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY);
    697 		KASSERT(E->pending);
    698 
    699 		if (E->needed <= E->pending) {
    700 			/*
    701 			 * Enough entropy between all the per-CPU
    702 			 * pools.  Wake up the housekeeping thread.
    703 			 *
    704 			 * If we don't need any entropy, this doesn't
    705 			 * mean much, but it is the only time we ever
    706 			 * gather additional entropy in case the
    707 			 * accounting has been overly optimistic.  This
    708 			 * happens at most once a minute, so there's
    709 			 * negligible performance cost.
    710 			 */
    711 			E->consolidate = true;
    712 			cv_broadcast(&E->cv);
    713 			if (E->needed == 0)
    714 				entropy_discretionary_evcnt.ev_count++;
    715 		} else {
    716 			/* Can't get full entropy.  Keep gathering.  */
    717 			entropy_partial_evcnt.ev_count++;
    718 		}
    719 	}
    720 	mutex_exit(&E->lock);
    721 }
    722 
    723 /*
    724  * entropy_enter_early(buf, len, nbits)
    725  *
    726  *	Do entropy bookkeeping globally, before we have established
    727  *	per-CPU pools.  Enter directly into the global pool in the hope
    728  *	that we enter enough before the first entropy_extract to thwart
    729  *	iterative-guessing attacks; entropy_extract will warn if not.
    730  */
    731 static void
    732 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
    733 {
    734 	bool notify = false;
    735 
    736 	if (E->stage >= ENTROPY_WARM)
    737 		mutex_enter(&E->lock);
    738 
    739 	/* Enter it into the pool.  */
    740 	entpool_enter(&E->pool, buf, len);
    741 
    742 	/*
    743 	 * Decide whether to notify reseed -- we will do so if either:
    744 	 * (a) we transition from partial entropy to full entropy, or
    745 	 * (b) we get a batch of full entropy all at once.
    746 	 */
    747 	notify |= (E->needed && E->needed <= nbits);
    748 	notify |= (nbits >= ENTROPY_CAPACITY*NBBY);
    749 
    750 	/* Subtract from the needed count and notify if appropriate.  */
    751 	E->needed -= MIN(E->needed, nbits);
    752 	if (notify) {
    753 		entropy_notify();
    754 		entropy_immediate_evcnt.ev_count++;
    755 	}
    756 
    757 	if (E->stage >= ENTROPY_WARM)
    758 		mutex_exit(&E->lock);
    759 }
    760 
    761 /*
    762  * entropy_enter(buf, len, nbits)
    763  *
    764  *	Enter len bytes of data from buf into the system's entropy
    765  *	pool, stirring as necessary when the internal buffer fills up.
    766  *	nbits is a lower bound on the number of bits of entropy in the
    767  *	process that led to this sample.
    768  */
    769 static void
    770 entropy_enter(const void *buf, size_t len, unsigned nbits)
    771 {
    772 	struct entropy_cpu *ec;
    773 	uint32_t pending;
    774 	int s;
    775 
    776 	KASSERTMSG(!cpu_intr_p(),
    777 	    "use entropy_enter_intr from interrupt context");
    778 	KASSERTMSG(howmany(nbits, NBBY) <= len,
    779 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
    780 
    781 	/* If it's too early after boot, just use entropy_enter_early.  */
    782 	if (__predict_false(E->stage < ENTROPY_HOT)) {
    783 		entropy_enter_early(buf, len, nbits);
    784 		return;
    785 	}
    786 
    787 	/*
    788 	 * Acquire the per-CPU state, blocking soft interrupts and
    789 	 * causing hard interrupts to drop samples on the floor.
    790 	 */
    791 	ec = percpu_getref(entropy_percpu);
    792 	s = splsoftserial();
    793 	KASSERT(!ec->ec_locked);
    794 	ec->ec_locked = true;
    795 	__insn_barrier();
    796 
    797 	/* Enter into the per-CPU pool.  */
    798 	entpool_enter(ec->ec_pool, buf, len);
    799 
    800 	/* Count up what we can add.  */
    801 	pending = ec->ec_pending;
    802 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
    803 	atomic_store_relaxed(&ec->ec_pending, pending);
    804 
    805 	/* Consolidate globally if appropriate based on what we added.  */
    806 	entropy_account_cpu(ec);
    807 
    808 	/* Release the per-CPU state.  */
    809 	KASSERT(ec->ec_locked);
    810 	__insn_barrier();
    811 	ec->ec_locked = false;
    812 	splx(s);
    813 	percpu_putref(entropy_percpu);
    814 }
    815 
    816 /*
    817  * entropy_enter_intr(buf, len, nbits)
    818  *
    819  *	Enter up to len bytes of data from buf into the system's
    820  *	entropy pool without stirring.  nbits is a lower bound on the
    821  *	number of bits of entropy in the process that led to this
    822  *	sample.  If the sample could be entered completely, assume
    823  *	nbits of entropy pending; otherwise assume none, since we don't
    824  *	know whether some parts of the sample are constant, for
    825  *	instance.  Schedule a softint to stir the entropy pool if
    826  *	needed.  Return true if used fully, false if truncated at all.
    827  *
    828  *	Using this in thread context will work, but you might as well
    829  *	use entropy_enter in that case.
    830  */
    831 static bool
    832 entropy_enter_intr(const void *buf, size_t len, unsigned nbits)
    833 {
    834 	struct entropy_cpu *ec;
    835 	bool fullyused = false;
    836 	uint32_t pending;
    837 
    838 	KASSERTMSG(howmany(nbits, NBBY) <= len,
    839 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
    840 
    841 	/* If it's too early after boot, just use entropy_enter_early.  */
    842 	if (__predict_false(E->stage < ENTROPY_HOT)) {
    843 		entropy_enter_early(buf, len, nbits);
    844 		return true;
    845 	}
    846 
    847 	/*
    848 	 * Acquire the per-CPU state.  If someone is in the middle of
    849 	 * using it, drop the sample.  Otherwise, take the lock so that
    850 	 * higher-priority interrupts will drop their samples.
    851 	 */
    852 	ec = percpu_getref(entropy_percpu);
    853 	if (ec->ec_locked)
    854 		goto out0;
    855 	ec->ec_locked = true;
    856 	__insn_barrier();
    857 
    858 	/*
    859 	 * Enter as much as we can into the per-CPU pool.  If it was
    860 	 * truncated, schedule a softint to stir the pool and stop.
    861 	 */
    862 	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
    863 		softint_schedule(entropy_sih);
    864 		goto out1;
    865 	}
    866 	fullyused = true;
    867 
    868 	/* Count up what we can contribute.  */
    869 	pending = ec->ec_pending;
    870 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
    871 	atomic_store_relaxed(&ec->ec_pending, pending);
    872 
    873 	/* Schedule a softint if we added anything and it matters.  */
    874 	if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
    875 		atomic_load_relaxed(&entropy_depletion)) &&
    876 	    nbits != 0)
    877 		softint_schedule(entropy_sih);
    878 
    879 out1:	/* Release the per-CPU state.  */
    880 	KASSERT(ec->ec_locked);
    881 	__insn_barrier();
    882 	ec->ec_locked = false;
    883 out0:	percpu_putref(entropy_percpu);
    884 
    885 	return fullyused;
    886 }
    887 
    888 /*
    889  * entropy_softintr(cookie)
    890  *
    891  *	Soft interrupt handler for entering entropy.  Takes care of
    892  *	stirring the local CPU's entropy pool if it filled up during
    893  *	hard interrupts, and promptly crediting entropy from the local
    894  *	CPU's entropy pool to the global entropy pool if needed.
    895  */
    896 static void
    897 entropy_softintr(void *cookie)
    898 {
    899 	struct entropy_cpu *ec;
    900 
    901 	/*
    902 	 * Acquire the per-CPU state.  Other users can lock this only
    903 	 * while soft interrupts are blocked.  Cause hard interrupts to
    904 	 * drop samples on the floor.
    905 	 */
    906 	ec = percpu_getref(entropy_percpu);
    907 	KASSERT(!ec->ec_locked);
    908 	ec->ec_locked = true;
    909 	__insn_barrier();
    910 
    911 	/* Count statistics.  */
    912 	ec->ec_softint_evcnt->ev_count++;
    913 
    914 	/* Stir the pool if necessary.  */
    915 	entpool_stir(ec->ec_pool);
    916 
    917 	/* Consolidate globally if appropriate based on what we added.  */
    918 	entropy_account_cpu(ec);
    919 
    920 	/* Release the per-CPU state.  */
    921 	KASSERT(ec->ec_locked);
    922 	__insn_barrier();
    923 	ec->ec_locked = false;
    924 	percpu_putref(entropy_percpu);
    925 }
    926 
    927 /*
    928  * entropy_thread(cookie)
    929  *
    930  *	Handle any asynchronous entropy housekeeping.
    931  */
    932 static void
    933 entropy_thread(void *cookie)
    934 {
    935 	bool consolidate;
    936 
    937 	for (;;) {
    938 		/*
    939 		 * Wait until there's full entropy somewhere among the
    940 		 * CPUs, as confirmed at most once per minute, or
    941 		 * someone wants to consolidate.
    942 		 */
    943 		if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) {
    944 			consolidate = true;
    945 		} else {
    946 			mutex_enter(&E->lock);
    947 			if (!E->consolidate)
    948 				cv_timedwait(&E->cv, &E->lock, 60*hz);
    949 			consolidate = E->consolidate;
    950 			E->consolidate = false;
    951 			mutex_exit(&E->lock);
    952 		}
    953 
    954 		if (consolidate) {
    955 			/* Do it.  */
    956 			entropy_do_consolidate();
    957 
    958 			/* Mitigate abuse.  */
    959 			kpause("entropy", false, hz, NULL);
    960 		}
    961 	}
    962 }
    963 
    964 /*
    965  * entropy_pending()
    966  *
    967  *	Count up the amount of entropy pending on other CPUs.
    968  */
    969 static uint32_t
    970 entropy_pending(void)
    971 {
    972 	uint32_t pending = 0;
    973 
    974 	percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending);
    975 	return pending;
    976 }
    977 
    978 static void
    979 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    980 {
    981 	struct entropy_cpu *ec = ptr;
    982 	uint32_t *pendingp = cookie;
    983 	uint32_t cpu_pending;
    984 
    985 	cpu_pending = atomic_load_relaxed(&ec->ec_pending);
    986 	*pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending);
    987 }
    988 
    989 /*
    990  * entropy_do_consolidate()
    991  *
    992  *	Issue a cross-call to gather entropy on all CPUs and advance
    993  *	the entropy epoch.
    994  */
    995 static void
    996 entropy_do_consolidate(void)
    997 {
    998 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
    999 	static struct timeval lasttime; /* serialized by E->lock */
   1000 	struct entpool pool;
   1001 	uint8_t buf[ENTPOOL_CAPACITY];
   1002 	unsigned diff;
   1003 	uint64_t ticket;
   1004 
   1005 	/* Gather entropy on all CPUs into a temporary pool.  */
   1006 	memset(&pool, 0, sizeof pool);
   1007 	ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
   1008 	xc_wait(ticket);
   1009 
   1010 	/* Acquire the lock to notify waiters.  */
   1011 	mutex_enter(&E->lock);
   1012 
   1013 	/* Count another consolidation.  */
   1014 	entropy_consolidate_evcnt.ev_count++;
   1015 
   1016 	/* Note when we last consolidated, i.e. now.  */
   1017 	E->timestamp = time_uptime;
   1018 
   1019 	/* Mix what we gathered into the global pool.  */
   1020 	entpool_extract(&pool, buf, sizeof buf);
   1021 	entpool_enter(&E->pool, buf, sizeof buf);
   1022 	explicit_memset(&pool, 0, sizeof pool);
   1023 
   1024 	/* Count the entropy that was gathered.  */
   1025 	diff = MIN(E->needed, E->pending);
   1026 	atomic_store_relaxed(&E->needed, E->needed - diff);
   1027 	E->pending -= diff;
   1028 	if (__predict_false(E->needed > 0)) {
   1029 		if (ratecheck(&lasttime, &interval))
   1030 			log(LOG_DEBUG, "entropy: WARNING:"
   1031 			    " consolidating less than full entropy\n");
   1032 	}
   1033 
   1034 	/* Advance the epoch and notify waiters.  */
   1035 	entropy_notify();
   1036 
   1037 	/* Release the lock.  */
   1038 	mutex_exit(&E->lock);
   1039 }
   1040 
   1041 /*
   1042  * entropy_consolidate_xc(vpool, arg2)
   1043  *
   1044  *	Extract output from the local CPU's input pool and enter it
   1045  *	into a temporary pool passed as vpool.
   1046  */
   1047 static void
   1048 entropy_consolidate_xc(void *vpool, void *arg2 __unused)
   1049 {
   1050 	struct entpool *pool = vpool;
   1051 	struct entropy_cpu *ec;
   1052 	uint8_t buf[ENTPOOL_CAPACITY];
   1053 	uint32_t extra[7];
   1054 	unsigned i = 0;
   1055 	int s;
   1056 
   1057 	/* Grab CPU number and cycle counter to mix extra into the pool.  */
   1058 	extra[i++] = cpu_number();
   1059 	extra[i++] = entropy_timer();
   1060 
   1061 	/*
   1062 	 * Acquire the per-CPU state, blocking soft interrupts and
   1063 	 * discarding entropy in hard interrupts, so that we can
   1064 	 * extract from the per-CPU pool.
   1065 	 */
   1066 	ec = percpu_getref(entropy_percpu);
   1067 	s = splsoftserial();
   1068 	KASSERT(!ec->ec_locked);
   1069 	ec->ec_locked = true;
   1070 	__insn_barrier();
   1071 	extra[i++] = entropy_timer();
   1072 
   1073 	/* Extract the data and count it no longer pending.  */
   1074 	entpool_extract(ec->ec_pool, buf, sizeof buf);
   1075 	atomic_store_relaxed(&ec->ec_pending, 0);
   1076 	extra[i++] = entropy_timer();
   1077 
   1078 	/* Release the per-CPU state.  */
   1079 	KASSERT(ec->ec_locked);
   1080 	__insn_barrier();
   1081 	ec->ec_locked = false;
   1082 	splx(s);
   1083 	percpu_putref(entropy_percpu);
   1084 	extra[i++] = entropy_timer();
   1085 
   1086 	/*
   1087 	 * Copy over statistics, and enter the per-CPU extract and the
   1088 	 * extra timing into the temporary pool, under the global lock.
   1089 	 */
   1090 	mutex_enter(&E->lock);
   1091 	extra[i++] = entropy_timer();
   1092 	entpool_enter(pool, buf, sizeof buf);
   1093 	explicit_memset(buf, 0, sizeof buf);
   1094 	extra[i++] = entropy_timer();
   1095 	KASSERT(i == __arraycount(extra));
   1096 	entpool_enter(pool, extra, sizeof extra);
   1097 	explicit_memset(extra, 0, sizeof extra);
   1098 	mutex_exit(&E->lock);
   1099 }
   1100 
   1101 /*
   1102  * entropy_notify()
   1103  *
   1104  *	Caller just contributed entropy to the global pool.  Advance
   1105  *	the entropy epoch and notify waiters.
   1106  *
   1107  *	Caller must hold the global entropy lock.  Except for the
   1108  *	`sysctl -w kern.entropy.consolidate=1` trigger, the caller must
   1109  *	have just have transitioned from partial entropy to full
   1110  *	entropy -- E->needed should be zero now.
   1111  */
   1112 static void
   1113 entropy_notify(void)
   1114 {
   1115 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1116 	static struct timeval lasttime; /* serialized by E->lock */
   1117 	unsigned epoch;
   1118 
   1119 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1120 
   1121 	/*
   1122 	 * If this is the first time, print a message to the console
   1123 	 * that we're ready so operators can compare it to the timing
   1124 	 * of other events.
   1125 	 */
   1126 	if (__predict_false(!rnd_initial_entropy) && E->needed == 0) {
   1127 		printf("entropy: ready\n");
   1128 		rnd_initial_entropy = 1;
   1129 	}
   1130 
   1131 	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
   1132 	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
   1133 	    ratecheck(&lasttime, &interval)) {
   1134 		epoch = E->epoch + 1;
   1135 		if (epoch == 0 || epoch == (unsigned)-1)
   1136 			epoch = 1;
   1137 		atomic_store_relaxed(&E->epoch, epoch);
   1138 	}
   1139 
   1140 	/* Notify waiters.  */
   1141 	if (E->stage >= ENTROPY_WARM) {
   1142 		cv_broadcast(&E->cv);
   1143 		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
   1144 	}
   1145 
   1146 	/* Count another notification.  */
   1147 	entropy_notify_evcnt.ev_count++;
   1148 }
   1149 
   1150 /*
   1151  * entropy_consolidate()
   1152  *
   1153  *	Trigger entropy consolidation and wait for it to complete.
   1154  *
   1155  *	This should be used sparingly, not periodically -- requiring
   1156  *	conscious intervention by the operator or a clear policy
   1157  *	decision.  Otherwise, the kernel will automatically consolidate
   1158  *	when enough entropy has been gathered into per-CPU pools to
   1159  *	transition to full entropy.
   1160  */
   1161 void
   1162 entropy_consolidate(void)
   1163 {
   1164 	uint64_t ticket;
   1165 	int error;
   1166 
   1167 	KASSERT(E->stage == ENTROPY_HOT);
   1168 
   1169 	mutex_enter(&E->lock);
   1170 	ticket = entropy_consolidate_evcnt.ev_count;
   1171 	E->consolidate = true;
   1172 	cv_broadcast(&E->cv);
   1173 	while (ticket == entropy_consolidate_evcnt.ev_count) {
   1174 		error = cv_wait_sig(&E->cv, &E->lock);
   1175 		if (error)
   1176 			break;
   1177 	}
   1178 	mutex_exit(&E->lock);
   1179 }
   1180 
   1181 /*
   1182  * sysctl -w kern.entropy.consolidate=1
   1183  *
   1184  *	Trigger entropy consolidation and wait for it to complete.
   1185  *	Writable only by superuser.  This, writing to /dev/random, and
   1186  *	ioctl(RNDADDDATA) are the only ways for the system to
   1187  *	consolidate entropy if the operator knows something the kernel
   1188  *	doesn't about how unpredictable the pending entropy pools are.
   1189  */
   1190 static int
   1191 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
   1192 {
   1193 	struct sysctlnode node = *rnode;
   1194 	int arg;
   1195 	int error;
   1196 
   1197 	KASSERT(E->stage == ENTROPY_HOT);
   1198 
   1199 	node.sysctl_data = &arg;
   1200 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1201 	if (error || newp == NULL)
   1202 		return error;
   1203 	if (arg)
   1204 		entropy_consolidate();
   1205 
   1206 	return error;
   1207 }
   1208 
   1209 /*
   1210  * sysctl -w kern.entropy.gather=1
   1211  *
   1212  *	Trigger gathering entropy from all on-demand sources, and wait
   1213  *	for synchronous sources (but not asynchronous sources) to
   1214  *	complete.  Writable only by superuser.
   1215  */
   1216 static int
   1217 sysctl_entropy_gather(SYSCTLFN_ARGS)
   1218 {
   1219 	struct sysctlnode node = *rnode;
   1220 	int arg;
   1221 	int error;
   1222 
   1223 	KASSERT(E->stage == ENTROPY_HOT);
   1224 
   1225 	node.sysctl_data = &arg;
   1226 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1227 	if (error || newp == NULL)
   1228 		return error;
   1229 	if (arg) {
   1230 		mutex_enter(&E->lock);
   1231 		entropy_request(ENTROPY_CAPACITY);
   1232 		mutex_exit(&E->lock);
   1233 	}
   1234 
   1235 	return 0;
   1236 }
   1237 
   1238 /*
   1239  * entropy_extract(buf, len, flags)
   1240  *
   1241  *	Extract len bytes from the global entropy pool into buf.
   1242  *
   1243  *	Flags may have:
   1244  *
   1245  *		ENTROPY_WAIT	Wait for entropy if not available yet.
   1246  *		ENTROPY_SIG	Allow interruption by a signal during wait.
   1247  *		ENTROPY_HARDFAIL Either fill the buffer with full entropy,
   1248  *				or fail without filling it at all.
   1249  *
   1250  *	Return zero on success, or error on failure:
   1251  *
   1252  *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
   1253  *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
   1254  *
   1255  *	If ENTROPY_WAIT is set, allowed only in thread context.  If
   1256  *	ENTROPY_WAIT is not set, allowed up to IPL_VM.  (XXX That's
   1257  *	awfully high...  Do we really need it in hard interrupts?  This
   1258  *	arises from use of cprng_strong(9).)
   1259  */
   1260 int
   1261 entropy_extract(void *buf, size_t len, int flags)
   1262 {
   1263 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1264 	static struct timeval lasttime; /* serialized by E->lock */
   1265 	int error;
   1266 
   1267 	if (ISSET(flags, ENTROPY_WAIT)) {
   1268 		ASSERT_SLEEPABLE();
   1269 		KASSERTMSG(E->stage >= ENTROPY_WARM,
   1270 		    "can't wait for entropy until warm");
   1271 	}
   1272 
   1273 	/* Acquire the global lock to get at the global pool.  */
   1274 	if (E->stage >= ENTROPY_WARM)
   1275 		mutex_enter(&E->lock);
   1276 
   1277 	/* Count up request for entropy in interrupt context.  */
   1278 	if (cpu_intr_p())
   1279 		entropy_extract_intr_evcnt.ev_count++;
   1280 
   1281 	/* Wait until there is enough entropy in the system.  */
   1282 	error = 0;
   1283 	while (E->needed) {
   1284 		/* Ask for more, synchronously if possible.  */
   1285 		entropy_request(len);
   1286 
   1287 		/* If we got enough, we're done.  */
   1288 		if (E->needed == 0) {
   1289 			KASSERT(error == 0);
   1290 			break;
   1291 		}
   1292 
   1293 		/* If not waiting, stop here.  */
   1294 		if (!ISSET(flags, ENTROPY_WAIT)) {
   1295 			error = EWOULDBLOCK;
   1296 			break;
   1297 		}
   1298 
   1299 		/* Wait for some entropy to come in and try again.  */
   1300 		KASSERT(E->stage >= ENTROPY_WARM);
   1301 		printf("entropy: pid %d (%s) blocking due to lack of entropy\n",
   1302 		       curproc->p_pid, curproc->p_comm);
   1303 
   1304 		if (ISSET(flags, ENTROPY_SIG)) {
   1305 			error = cv_wait_sig(&E->cv, &E->lock);
   1306 			if (error)
   1307 				break;
   1308 		} else {
   1309 			cv_wait(&E->cv, &E->lock);
   1310 		}
   1311 	}
   1312 
   1313 	/*
   1314 	 * Count failure -- but fill the buffer nevertheless, unless
   1315 	 * the caller specified ENTROPY_HARDFAIL.
   1316 	 */
   1317 	if (error) {
   1318 		if (ISSET(flags, ENTROPY_HARDFAIL))
   1319 			goto out;
   1320 		entropy_extract_fail_evcnt.ev_count++;
   1321 	}
   1322 
   1323 	/*
   1324 	 * Report a warning if we have never yet reached full entropy.
   1325 	 * This is the only case where we consider entropy to be
   1326 	 * `depleted' without kern.entropy.depletion enabled -- when we
   1327 	 * only have partial entropy, an adversary may be able to
   1328 	 * narrow the state of the pool down to a small number of
   1329 	 * possibilities; the output then enables them to confirm a
   1330 	 * guess, reducing its entropy from the adversary's perspective
   1331 	 * to zero.
   1332 	 */
   1333 	if (__predict_false(E->epoch == (unsigned)-1)) {
   1334 		if (ratecheck(&lasttime, &interval))
   1335 			printf("entropy: WARNING:"
   1336 			    " extracting entropy too early\n");
   1337 		atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY);
   1338 	}
   1339 
   1340 	/* Extract data from the pool, and `deplete' if we're doing that.  */
   1341 	entpool_extract(&E->pool, buf, len);
   1342 	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
   1343 	    error == 0) {
   1344 		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
   1345 
   1346 		atomic_store_relaxed(&E->needed,
   1347 		    E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost));
   1348 		entropy_deplete_evcnt.ev_count++;
   1349 	}
   1350 
   1351 out:	/* Release the global lock and return the error.  */
   1352 	if (E->stage >= ENTROPY_WARM)
   1353 		mutex_exit(&E->lock);
   1354 	return error;
   1355 }
   1356 
   1357 /*
   1358  * entropy_poll(events)
   1359  *
   1360  *	Return the subset of events ready, and if it is not all of
   1361  *	events, record curlwp as waiting for entropy.
   1362  */
   1363 int
   1364 entropy_poll(int events)
   1365 {
   1366 	int revents = 0;
   1367 
   1368 	KASSERT(E->stage >= ENTROPY_WARM);
   1369 
   1370 	/* Always ready for writing.  */
   1371 	revents |= events & (POLLOUT|POLLWRNORM);
   1372 
   1373 	/* Narrow it down to reads.  */
   1374 	events &= POLLIN|POLLRDNORM;
   1375 	if (events == 0)
   1376 		return revents;
   1377 
   1378 	/*
   1379 	 * If we have reached full entropy and we're not depleting
   1380 	 * entropy, we are forever ready.
   1381 	 */
   1382 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
   1383 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
   1384 		return revents | events;
   1385 
   1386 	/*
   1387 	 * Otherwise, check whether we need entropy under the lock.  If
   1388 	 * we don't, we're ready; if we do, add ourselves to the queue.
   1389 	 */
   1390 	mutex_enter(&E->lock);
   1391 	if (E->needed == 0)
   1392 		revents |= events;
   1393 	else
   1394 		selrecord(curlwp, &E->selq);
   1395 	mutex_exit(&E->lock);
   1396 
   1397 	return revents;
   1398 }
   1399 
   1400 /*
   1401  * filt_entropy_read_detach(kn)
   1402  *
   1403  *	struct filterops::f_detach callback for entropy read events:
   1404  *	remove kn from the list of waiters.
   1405  */
   1406 static void
   1407 filt_entropy_read_detach(struct knote *kn)
   1408 {
   1409 
   1410 	KASSERT(E->stage >= ENTROPY_WARM);
   1411 
   1412 	mutex_enter(&E->lock);
   1413 	selremove_knote(&E->selq, kn);
   1414 	mutex_exit(&E->lock);
   1415 }
   1416 
   1417 /*
   1418  * filt_entropy_read_event(kn, hint)
   1419  *
   1420  *	struct filterops::f_event callback for entropy read events:
   1421  *	poll for entropy.  Caller must hold the global entropy lock if
   1422  *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
   1423  */
   1424 static int
   1425 filt_entropy_read_event(struct knote *kn, long hint)
   1426 {
   1427 	int ret;
   1428 
   1429 	KASSERT(E->stage >= ENTROPY_WARM);
   1430 
   1431 	/* Acquire the lock, if caller is outside entropy subsystem.  */
   1432 	if (hint == NOTE_SUBMIT)
   1433 		KASSERT(mutex_owned(&E->lock));
   1434 	else
   1435 		mutex_enter(&E->lock);
   1436 
   1437 	/*
   1438 	 * If we still need entropy, can't read anything; if not, can
   1439 	 * read arbitrarily much.
   1440 	 */
   1441 	if (E->needed != 0) {
   1442 		ret = 0;
   1443 	} else {
   1444 		if (atomic_load_relaxed(&entropy_depletion))
   1445 			kn->kn_data = ENTROPY_CAPACITY*NBBY;
   1446 		else
   1447 			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
   1448 		ret = 1;
   1449 	}
   1450 
   1451 	/* Release the lock, if caller is outside entropy subsystem.  */
   1452 	if (hint == NOTE_SUBMIT)
   1453 		KASSERT(mutex_owned(&E->lock));
   1454 	else
   1455 		mutex_exit(&E->lock);
   1456 
   1457 	return ret;
   1458 }
   1459 
   1460 static const struct filterops entropy_read_filtops = {
   1461 	.f_isfd = 1,		/* XXX Makes sense only for /dev/u?random.  */
   1462 	.f_attach = NULL,
   1463 	.f_detach = filt_entropy_read_detach,
   1464 	.f_event = filt_entropy_read_event,
   1465 };
   1466 
   1467 /*
   1468  * entropy_kqfilter(kn)
   1469  *
   1470  *	Register kn to receive entropy event notifications.  May be
   1471  *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
   1472  */
   1473 int
   1474 entropy_kqfilter(struct knote *kn)
   1475 {
   1476 
   1477 	KASSERT(E->stage >= ENTROPY_WARM);
   1478 
   1479 	switch (kn->kn_filter) {
   1480 	case EVFILT_READ:
   1481 		/* Enter into the global select queue.  */
   1482 		mutex_enter(&E->lock);
   1483 		kn->kn_fop = &entropy_read_filtops;
   1484 		selrecord_knote(&E->selq, kn);
   1485 		mutex_exit(&E->lock);
   1486 		return 0;
   1487 	case EVFILT_WRITE:
   1488 		/* Can always dump entropy into the system.  */
   1489 		kn->kn_fop = &seltrue_filtops;
   1490 		return 0;
   1491 	default:
   1492 		return EINVAL;
   1493 	}
   1494 }
   1495 
   1496 /*
   1497  * rndsource_setcb(rs, get, getarg)
   1498  *
   1499  *	Set the request callback for the entropy source rs, if it can
   1500  *	provide entropy on demand.  Must precede rnd_attach_source.
   1501  */
   1502 void
   1503 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
   1504     void *getarg)
   1505 {
   1506 
   1507 	rs->get = get;
   1508 	rs->getarg = getarg;
   1509 }
   1510 
   1511 /*
   1512  * rnd_attach_source(rs, name, type, flags)
   1513  *
   1514  *	Attach the entropy source rs.  Must be done after
   1515  *	rndsource_setcb, if any, and before any calls to rnd_add_data.
   1516  */
   1517 void
   1518 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
   1519     uint32_t flags)
   1520 {
   1521 	uint32_t extra[4];
   1522 	unsigned i = 0;
   1523 
   1524 	/* Grab cycle counter to mix extra into the pool.  */
   1525 	extra[i++] = entropy_timer();
   1526 
   1527 	/*
   1528 	 * Apply some standard flags:
   1529 	 *
   1530 	 * - We do not bother with network devices by default, for
   1531 	 *   hysterical raisins (perhaps: because it is often the case
   1532 	 *   that an adversary can influence network packet timings).
   1533 	 */
   1534 	switch (type) {
   1535 	case RND_TYPE_NET:
   1536 		flags |= RND_FLAG_NO_COLLECT;
   1537 		break;
   1538 	}
   1539 
   1540 	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
   1541 	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
   1542 
   1543 	/* Initialize the random source.  */
   1544 	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
   1545 	strlcpy(rs->name, name, sizeof(rs->name));
   1546 	rs->total = 0;
   1547 	rs->type = type;
   1548 	rs->flags = flags;
   1549 	if (E->stage >= ENTROPY_WARM)
   1550 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
   1551 	extra[i++] = entropy_timer();
   1552 
   1553 	/* Wire it into the global list of random sources.  */
   1554 	if (E->stage >= ENTROPY_WARM)
   1555 		mutex_enter(&E->lock);
   1556 	LIST_INSERT_HEAD(&E->sources, rs, list);
   1557 	if (E->stage >= ENTROPY_WARM)
   1558 		mutex_exit(&E->lock);
   1559 	extra[i++] = entropy_timer();
   1560 
   1561 	/* Request that it provide entropy ASAP, if we can.  */
   1562 	if (ISSET(flags, RND_FLAG_HASCB))
   1563 		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
   1564 	extra[i++] = entropy_timer();
   1565 
   1566 	/* Mix the extra into the pool.  */
   1567 	KASSERT(i == __arraycount(extra));
   1568 	entropy_enter(extra, sizeof extra, 0);
   1569 	explicit_memset(extra, 0, sizeof extra);
   1570 }
   1571 
   1572 /*
   1573  * rnd_detach_source(rs)
   1574  *
   1575  *	Detach the entropy source rs.  May sleep waiting for users to
   1576  *	drain.  Further use is not allowed.
   1577  */
   1578 void
   1579 rnd_detach_source(struct krndsource *rs)
   1580 {
   1581 
   1582 	/*
   1583 	 * If we're cold (shouldn't happen, but hey), just remove it
   1584 	 * from the list -- there's nothing allocated.
   1585 	 */
   1586 	if (E->stage == ENTROPY_COLD) {
   1587 		LIST_REMOVE(rs, list);
   1588 		return;
   1589 	}
   1590 
   1591 	/* We may have to wait for entropy_request.  */
   1592 	ASSERT_SLEEPABLE();
   1593 
   1594 	/* Wait until the source list is not in use, and remove it.  */
   1595 	mutex_enter(&E->lock);
   1596 	while (E->sourcelock)
   1597 		cv_wait(&E->cv, &E->lock);
   1598 	LIST_REMOVE(rs, list);
   1599 	mutex_exit(&E->lock);
   1600 
   1601 	/* Free the per-CPU data.  */
   1602 	percpu_free(rs->state, sizeof(struct rndsource_cpu));
   1603 }
   1604 
   1605 /*
   1606  * rnd_lock_sources()
   1607  *
   1608  *	Prevent changes to the list of rndsources while we iterate it.
   1609  *	Interruptible.  Caller must hold the global entropy lock.  If
   1610  *	successful, no rndsource will go away until rnd_unlock_sources
   1611  *	even while the caller releases the global entropy lock.
   1612  */
   1613 static int
   1614 rnd_lock_sources(void)
   1615 {
   1616 	int error;
   1617 
   1618 	KASSERT(mutex_owned(&E->lock));
   1619 
   1620 	while (E->sourcelock) {
   1621 		error = cv_wait_sig(&E->cv, &E->lock);
   1622 		if (error)
   1623 			return error;
   1624 	}
   1625 
   1626 	E->sourcelock = curlwp;
   1627 	return 0;
   1628 }
   1629 
   1630 /*
   1631  * rnd_trylock_sources()
   1632  *
   1633  *	Try to lock the list of sources, but if it's already locked,
   1634  *	fail.  Caller must hold the global entropy lock.  If
   1635  *	successful, no rndsource will go away until rnd_unlock_sources
   1636  *	even while the caller releases the global entropy lock.
   1637  */
   1638 static bool
   1639 rnd_trylock_sources(void)
   1640 {
   1641 
   1642 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1643 
   1644 	if (E->sourcelock)
   1645 		return false;
   1646 	E->sourcelock = curlwp;
   1647 	return true;
   1648 }
   1649 
   1650 /*
   1651  * rnd_unlock_sources()
   1652  *
   1653  *	Unlock the list of sources after rnd_lock_sources or
   1654  *	rnd_trylock_sources.  Caller must hold the global entropy lock.
   1655  */
   1656 static void
   1657 rnd_unlock_sources(void)
   1658 {
   1659 
   1660 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1661 
   1662 	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
   1663 	    curlwp, E->sourcelock);
   1664 	E->sourcelock = NULL;
   1665 	if (E->stage >= ENTROPY_WARM)
   1666 		cv_broadcast(&E->cv);
   1667 }
   1668 
   1669 /*
   1670  * rnd_sources_locked()
   1671  *
   1672  *	True if we hold the list of rndsources locked, for diagnostic
   1673  *	assertions.
   1674  */
   1675 static bool __diagused
   1676 rnd_sources_locked(void)
   1677 {
   1678 
   1679 	return E->sourcelock == curlwp;
   1680 }
   1681 
   1682 /*
   1683  * entropy_request(nbytes)
   1684  *
   1685  *	Request nbytes bytes of entropy from all sources in the system.
   1686  *	OK if we overdo it.  Caller must hold the global entropy lock;
   1687  *	will release and re-acquire it.
   1688  */
   1689 static void
   1690 entropy_request(size_t nbytes)
   1691 {
   1692 	struct krndsource *rs;
   1693 
   1694 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1695 
   1696 	/*
   1697 	 * If there is a request in progress, let it proceed.
   1698 	 * Otherwise, note that a request is in progress to avoid
   1699 	 * reentry and to block rnd_detach_source until we're done.
   1700 	 */
   1701 	if (!rnd_trylock_sources())
   1702 		return;
   1703 	entropy_request_evcnt.ev_count++;
   1704 
   1705 	/* Clamp to the maximum reasonable request.  */
   1706 	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
   1707 
   1708 	/* Walk the list of sources.  */
   1709 	LIST_FOREACH(rs, &E->sources, list) {
   1710 		/* Skip sources without callbacks.  */
   1711 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
   1712 			continue;
   1713 
   1714 		/*
   1715 		 * Skip sources that are disabled altogether -- we
   1716 		 * would just ignore their samples anyway.
   1717 		 */
   1718 		if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
   1719 			continue;
   1720 
   1721 		/* Drop the lock while we call the callback.  */
   1722 		if (E->stage >= ENTROPY_WARM)
   1723 			mutex_exit(&E->lock);
   1724 		(*rs->get)(nbytes, rs->getarg);
   1725 		if (E->stage >= ENTROPY_WARM)
   1726 			mutex_enter(&E->lock);
   1727 	}
   1728 
   1729 	/* Notify rnd_detach_source that the request is done.  */
   1730 	rnd_unlock_sources();
   1731 }
   1732 
   1733 /*
   1734  * rnd_add_uint32(rs, value)
   1735  *
   1736  *	Enter 32 bits of data from an entropy source into the pool.
   1737  *
   1738  *	If rs is NULL, may not be called from interrupt context.
   1739  *
   1740  *	If rs is non-NULL, may be called from any context.  May drop
   1741  *	data if called from interrupt context.
   1742  */
   1743 void
   1744 rnd_add_uint32(struct krndsource *rs, uint32_t value)
   1745 {
   1746 
   1747 	rnd_add_data(rs, &value, sizeof value, 0);
   1748 }
   1749 
   1750 void
   1751 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
   1752 {
   1753 
   1754 	rnd_add_data(rs, &value, sizeof value, 0);
   1755 }
   1756 
   1757 void
   1758 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
   1759 {
   1760 
   1761 	rnd_add_data(rs, &value, sizeof value, 0);
   1762 }
   1763 
   1764 /*
   1765  * rnd_add_data(rs, buf, len, entropybits)
   1766  *
   1767  *	Enter data from an entropy source into the pool, with a
   1768  *	driver's estimate of how much entropy the physical source of
   1769  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
   1770  *	estimate and treat it as zero.
   1771  *
   1772  *	If rs is NULL, may not be called from interrupt context.
   1773  *
   1774  *	If rs is non-NULL, may be called from any context.  May drop
   1775  *	data if called from interrupt context.
   1776  */
   1777 void
   1778 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
   1779     uint32_t entropybits)
   1780 {
   1781 	uint32_t extra;
   1782 	uint32_t flags;
   1783 
   1784 	KASSERTMSG(howmany(entropybits, NBBY) <= len,
   1785 	    "%s: impossible entropy rate:"
   1786 	    " %"PRIu32" bits in %"PRIu32"-byte string",
   1787 	    rs ? rs->name : "(anonymous)", entropybits, len);
   1788 
   1789 	/* If there's no rndsource, just enter the data and time now.  */
   1790 	if (rs == NULL) {
   1791 		entropy_enter(buf, len, entropybits);
   1792 		extra = entropy_timer();
   1793 		entropy_enter(&extra, sizeof extra, 0);
   1794 		explicit_memset(&extra, 0, sizeof extra);
   1795 		return;
   1796 	}
   1797 
   1798 	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
   1799 	flags = atomic_load_relaxed(&rs->flags);
   1800 
   1801 	/*
   1802 	 * Skip if:
   1803 	 * - we're not collecting entropy, or
   1804 	 * - the operator doesn't want to collect entropy from this, or
   1805 	 * - neither data nor timings are being collected from this.
   1806 	 */
   1807 	if (!atomic_load_relaxed(&entropy_collection) ||
   1808 	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
   1809 	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
   1810 		return;
   1811 
   1812 	/* If asked, ignore the estimate.  */
   1813 	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
   1814 		entropybits = 0;
   1815 
   1816 	/* If we are collecting data, enter them.  */
   1817 	if (ISSET(flags, RND_FLAG_COLLECT_VALUE))
   1818 		rnd_add_data_1(rs, buf, len, entropybits);
   1819 
   1820 	/* If we are collecting timings, enter one.  */
   1821 	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
   1822 		extra = entropy_timer();
   1823 		rnd_add_data_1(rs, &extra, sizeof extra, 0);
   1824 	}
   1825 }
   1826 
   1827 /*
   1828  * rnd_add_data_1(rs, buf, len, entropybits)
   1829  *
   1830  *	Internal subroutine to call either entropy_enter_intr, if we're
   1831  *	in interrupt context, or entropy_enter if not, and to count the
   1832  *	entropy in an rndsource.
   1833  */
   1834 static void
   1835 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
   1836     uint32_t entropybits)
   1837 {
   1838 	bool fullyused;
   1839 
   1840 	/*
   1841 	 * If we're in interrupt context, use entropy_enter_intr and
   1842 	 * take note of whether it consumed the full sample; if not,
   1843 	 * use entropy_enter, which always consumes the full sample.
   1844 	 */
   1845 	if (curlwp && cpu_intr_p()) {
   1846 		fullyused = entropy_enter_intr(buf, len, entropybits);
   1847 	} else {
   1848 		entropy_enter(buf, len, entropybits);
   1849 		fullyused = true;
   1850 	}
   1851 
   1852 	/*
   1853 	 * If we used the full sample, note how many bits were
   1854 	 * contributed from this source.
   1855 	 */
   1856 	if (fullyused) {
   1857 		if (E->stage < ENTROPY_HOT) {
   1858 			if (E->stage >= ENTROPY_WARM)
   1859 				mutex_enter(&E->lock);
   1860 			rs->total += MIN(UINT_MAX - rs->total, entropybits);
   1861 			if (E->stage >= ENTROPY_WARM)
   1862 				mutex_exit(&E->lock);
   1863 		} else {
   1864 			struct rndsource_cpu *rc = percpu_getref(rs->state);
   1865 			unsigned nbits = rc->rc_nbits;
   1866 
   1867 			nbits += MIN(UINT_MAX - nbits, entropybits);
   1868 			atomic_store_relaxed(&rc->rc_nbits, nbits);
   1869 			percpu_putref(rs->state);
   1870 		}
   1871 	}
   1872 }
   1873 
   1874 /*
   1875  * rnd_add_data_sync(rs, buf, len, entropybits)
   1876  *
   1877  *	Same as rnd_add_data.  Originally used in rndsource callbacks,
   1878  *	to break an unnecessary cycle; no longer really needed.
   1879  */
   1880 void
   1881 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
   1882     uint32_t entropybits)
   1883 {
   1884 
   1885 	rnd_add_data(rs, buf, len, entropybits);
   1886 }
   1887 
   1888 /*
   1889  * rndsource_entropybits(rs)
   1890  *
   1891  *	Return approximately the number of bits of entropy that have
   1892  *	been contributed via rs so far.  Approximate if other CPUs may
   1893  *	be calling rnd_add_data concurrently.
   1894  */
   1895 static unsigned
   1896 rndsource_entropybits(struct krndsource *rs)
   1897 {
   1898 	unsigned nbits = rs->total;
   1899 
   1900 	KASSERT(E->stage >= ENTROPY_WARM);
   1901 	KASSERT(rnd_sources_locked());
   1902 	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
   1903 	return nbits;
   1904 }
   1905 
   1906 static void
   1907 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
   1908 {
   1909 	struct rndsource_cpu *rc = ptr;
   1910 	unsigned *nbitsp = cookie;
   1911 	unsigned cpu_nbits;
   1912 
   1913 	cpu_nbits = atomic_load_relaxed(&rc->rc_nbits);
   1914 	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
   1915 }
   1916 
   1917 /*
   1918  * rndsource_to_user(rs, urs)
   1919  *
   1920  *	Copy a description of rs out to urs for userland.
   1921  */
   1922 static void
   1923 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
   1924 {
   1925 
   1926 	KASSERT(E->stage >= ENTROPY_WARM);
   1927 	KASSERT(rnd_sources_locked());
   1928 
   1929 	/* Avoid kernel memory disclosure.  */
   1930 	memset(urs, 0, sizeof(*urs));
   1931 
   1932 	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
   1933 	strlcpy(urs->name, rs->name, sizeof(urs->name));
   1934 	urs->total = rndsource_entropybits(rs);
   1935 	urs->type = rs->type;
   1936 	urs->flags = atomic_load_relaxed(&rs->flags);
   1937 }
   1938 
   1939 /*
   1940  * rndsource_to_user_est(rs, urse)
   1941  *
   1942  *	Copy a description of rs and estimation statistics out to urse
   1943  *	for userland.
   1944  */
   1945 static void
   1946 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
   1947 {
   1948 
   1949 	KASSERT(E->stage >= ENTROPY_WARM);
   1950 	KASSERT(rnd_sources_locked());
   1951 
   1952 	/* Avoid kernel memory disclosure.  */
   1953 	memset(urse, 0, sizeof(*urse));
   1954 
   1955 	/* Copy out the rndsource description.  */
   1956 	rndsource_to_user(rs, &urse->rt);
   1957 
   1958 	/* Zero out the statistics because we don't do estimation.  */
   1959 	urse->dt_samples = 0;
   1960 	urse->dt_total = 0;
   1961 	urse->dv_samples = 0;
   1962 	urse->dv_total = 0;
   1963 }
   1964 
   1965 /*
   1966  * entropy_reset_xc(arg1, arg2)
   1967  *
   1968  *	Reset the current CPU's pending entropy to zero.
   1969  */
   1970 static void
   1971 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
   1972 {
   1973 	uint32_t extra = entropy_timer();
   1974 	struct entropy_cpu *ec;
   1975 	int s;
   1976 
   1977 	/*
   1978 	 * Acquire the per-CPU state, blocking soft interrupts and
   1979 	 * causing hard interrupts to drop samples on the floor.
   1980 	 */
   1981 	ec = percpu_getref(entropy_percpu);
   1982 	s = splsoftserial();
   1983 	KASSERT(!ec->ec_locked);
   1984 	ec->ec_locked = true;
   1985 	__insn_barrier();
   1986 
   1987 	/* Zero the pending count and enter a cycle count for fun.  */
   1988 	ec->ec_pending = 0;
   1989 	entpool_enter(ec->ec_pool, &extra, sizeof extra);
   1990 
   1991 	/* Release the per-CPU state.  */
   1992 	KASSERT(ec->ec_locked);
   1993 	__insn_barrier();
   1994 	ec->ec_locked = false;
   1995 	splx(s);
   1996 	percpu_putref(entropy_percpu);
   1997 }
   1998 
   1999 /*
   2000  * entropy_ioctl(cmd, data)
   2001  *
   2002  *	Handle various /dev/random ioctl queries.
   2003  */
   2004 int
   2005 entropy_ioctl(unsigned long cmd, void *data)
   2006 {
   2007 	struct krndsource *rs;
   2008 	bool privileged;
   2009 	int error;
   2010 
   2011 	KASSERT(E->stage >= ENTROPY_WARM);
   2012 
   2013 	/* Verify user's authorization to perform the ioctl.  */
   2014 	switch (cmd) {
   2015 	case RNDGETENTCNT:
   2016 	case RNDGETPOOLSTAT:
   2017 	case RNDGETSRCNUM:
   2018 	case RNDGETSRCNAME:
   2019 	case RNDGETESTNUM:
   2020 	case RNDGETESTNAME:
   2021 		error = kauth_authorize_device(curlwp->l_cred,
   2022 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
   2023 		break;
   2024 	case RNDCTL:
   2025 		error = kauth_authorize_device(curlwp->l_cred,
   2026 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
   2027 		break;
   2028 	case RNDADDDATA:
   2029 		error = kauth_authorize_device(curlwp->l_cred,
   2030 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
   2031 		/* Ascertain whether the user's inputs should be counted.  */
   2032 		if (kauth_authorize_device(curlwp->l_cred,
   2033 			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
   2034 			NULL, NULL, NULL, NULL) == 0)
   2035 			privileged = true;
   2036 		break;
   2037 	default: {
   2038 		/*
   2039 		 * XXX Hack to avoid changing module ABI so this can be
   2040 		 * pulled up.  Later, we can just remove the argument.
   2041 		 */
   2042 		static const struct fileops fops = {
   2043 			.fo_ioctl = rnd_system_ioctl,
   2044 		};
   2045 		struct file f = {
   2046 			.f_ops = &fops,
   2047 		};
   2048 		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
   2049 		    enosys(), error);
   2050 #if defined(_LP64)
   2051 		if (error == ENOSYS)
   2052 			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
   2053 			    enosys(), error);
   2054 #endif
   2055 		if (error == ENOSYS)
   2056 			error = ENOTTY;
   2057 		break;
   2058 	}
   2059 	}
   2060 
   2061 	/* If anything went wrong with authorization, stop here.  */
   2062 	if (error)
   2063 		return error;
   2064 
   2065 	/* Dispatch on the command.  */
   2066 	switch (cmd) {
   2067 	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
   2068 		uint32_t *countp = data;
   2069 
   2070 		mutex_enter(&E->lock);
   2071 		*countp = ENTROPY_CAPACITY*NBBY - E->needed;
   2072 		mutex_exit(&E->lock);
   2073 
   2074 		break;
   2075 	}
   2076 	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
   2077 		rndpoolstat_t *pstat = data;
   2078 
   2079 		mutex_enter(&E->lock);
   2080 
   2081 		/* parameters */
   2082 		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
   2083 		pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */
   2084 		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
   2085 
   2086 		/* state */
   2087 		pstat->added = 0; /* XXX total entropy_enter count */
   2088 		pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed;
   2089 		pstat->removed = 0; /* XXX total entropy_extract count */
   2090 		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
   2091 		pstat->generated = 0; /* XXX bits of data...fabricated? */
   2092 
   2093 		mutex_exit(&E->lock);
   2094 		break;
   2095 	}
   2096 	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
   2097 		rndstat_t *stat = data;
   2098 		uint32_t start = 0, i = 0;
   2099 
   2100 		/* Skip if none requested; fail if too many requested.  */
   2101 		if (stat->count == 0)
   2102 			break;
   2103 		if (stat->count > RND_MAXSTATCOUNT)
   2104 			return EINVAL;
   2105 
   2106 		/*
   2107 		 * Under the lock, find the first one, copy out as many
   2108 		 * as requested, and report how many we copied out.
   2109 		 */
   2110 		mutex_enter(&E->lock);
   2111 		error = rnd_lock_sources();
   2112 		if (error) {
   2113 			mutex_exit(&E->lock);
   2114 			return error;
   2115 		}
   2116 		LIST_FOREACH(rs, &E->sources, list) {
   2117 			if (start++ == stat->start)
   2118 				break;
   2119 		}
   2120 		while (i < stat->count && rs != NULL) {
   2121 			mutex_exit(&E->lock);
   2122 			rndsource_to_user(rs, &stat->source[i++]);
   2123 			mutex_enter(&E->lock);
   2124 			rs = LIST_NEXT(rs, list);
   2125 		}
   2126 		KASSERT(i <= stat->count);
   2127 		stat->count = i;
   2128 		rnd_unlock_sources();
   2129 		mutex_exit(&E->lock);
   2130 		break;
   2131 	}
   2132 	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
   2133 		rndstat_est_t *estat = data;
   2134 		uint32_t start = 0, i = 0;
   2135 
   2136 		/* Skip if none requested; fail if too many requested.  */
   2137 		if (estat->count == 0)
   2138 			break;
   2139 		if (estat->count > RND_MAXSTATCOUNT)
   2140 			return EINVAL;
   2141 
   2142 		/*
   2143 		 * Under the lock, find the first one, copy out as many
   2144 		 * as requested, and report how many we copied out.
   2145 		 */
   2146 		mutex_enter(&E->lock);
   2147 		error = rnd_lock_sources();
   2148 		if (error) {
   2149 			mutex_exit(&E->lock);
   2150 			return error;
   2151 		}
   2152 		LIST_FOREACH(rs, &E->sources, list) {
   2153 			if (start++ == estat->start)
   2154 				break;
   2155 		}
   2156 		while (i < estat->count && rs != NULL) {
   2157 			mutex_exit(&E->lock);
   2158 			rndsource_to_user_est(rs, &estat->source[i++]);
   2159 			mutex_enter(&E->lock);
   2160 			rs = LIST_NEXT(rs, list);
   2161 		}
   2162 		KASSERT(i <= estat->count);
   2163 		estat->count = i;
   2164 		rnd_unlock_sources();
   2165 		mutex_exit(&E->lock);
   2166 		break;
   2167 	}
   2168 	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
   2169 		rndstat_name_t *nstat = data;
   2170 		const size_t n = sizeof(rs->name);
   2171 
   2172 		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
   2173 
   2174 		/*
   2175 		 * Under the lock, search by name.  If found, copy it
   2176 		 * out; if not found, fail with ENOENT.
   2177 		 */
   2178 		mutex_enter(&E->lock);
   2179 		error = rnd_lock_sources();
   2180 		if (error) {
   2181 			mutex_exit(&E->lock);
   2182 			return error;
   2183 		}
   2184 		LIST_FOREACH(rs, &E->sources, list) {
   2185 			if (strncmp(rs->name, nstat->name, n) == 0)
   2186 				break;
   2187 		}
   2188 		if (rs != NULL) {
   2189 			mutex_exit(&E->lock);
   2190 			rndsource_to_user(rs, &nstat->source);
   2191 			mutex_enter(&E->lock);
   2192 		} else {
   2193 			error = ENOENT;
   2194 		}
   2195 		rnd_unlock_sources();
   2196 		mutex_exit(&E->lock);
   2197 		break;
   2198 	}
   2199 	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
   2200 		rndstat_est_name_t *enstat = data;
   2201 		const size_t n = sizeof(rs->name);
   2202 
   2203 		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
   2204 
   2205 		/*
   2206 		 * Under the lock, search by name.  If found, copy it
   2207 		 * out; if not found, fail with ENOENT.
   2208 		 */
   2209 		mutex_enter(&E->lock);
   2210 		error = rnd_lock_sources();
   2211 		if (error) {
   2212 			mutex_exit(&E->lock);
   2213 			return error;
   2214 		}
   2215 		LIST_FOREACH(rs, &E->sources, list) {
   2216 			if (strncmp(rs->name, enstat->name, n) == 0)
   2217 				break;
   2218 		}
   2219 		if (rs != NULL) {
   2220 			mutex_exit(&E->lock);
   2221 			rndsource_to_user_est(rs, &enstat->source);
   2222 			mutex_enter(&E->lock);
   2223 		} else {
   2224 			error = ENOENT;
   2225 		}
   2226 		rnd_unlock_sources();
   2227 		mutex_exit(&E->lock);
   2228 		break;
   2229 	}
   2230 	case RNDCTL: {		/* Modify entropy source flags.  */
   2231 		rndctl_t *rndctl = data;
   2232 		const size_t n = sizeof(rs->name);
   2233 		uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
   2234 		uint32_t flags;
   2235 		bool reset = false, request = false;
   2236 
   2237 		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
   2238 
   2239 		/* Whitelist the flags that user can change.  */
   2240 		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
   2241 
   2242 		/*
   2243 		 * For each matching rndsource, either by type if
   2244 		 * specified or by name if not, set the masked flags.
   2245 		 */
   2246 		mutex_enter(&E->lock);
   2247 		LIST_FOREACH(rs, &E->sources, list) {
   2248 			if (rndctl->type != 0xff) {
   2249 				if (rs->type != rndctl->type)
   2250 					continue;
   2251 			} else {
   2252 				if (strncmp(rs->name, rndctl->name, n) != 0)
   2253 					continue;
   2254 			}
   2255 			flags = rs->flags & ~rndctl->mask;
   2256 			flags |= rndctl->flags & rndctl->mask;
   2257 			if ((rs->flags & resetflags) == 0 &&
   2258 			    (flags & resetflags) != 0)
   2259 				reset = true;
   2260 			if ((rs->flags ^ flags) & resetflags)
   2261 				request = true;
   2262 			atomic_store_relaxed(&rs->flags, flags);
   2263 		}
   2264 		mutex_exit(&E->lock);
   2265 
   2266 		/*
   2267 		 * If we disabled estimation or collection, nix all the
   2268 		 * pending entropy and set needed to the maximum.
   2269 		 */
   2270 		if (reset) {
   2271 			xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
   2272 			mutex_enter(&E->lock);
   2273 			E->pending = 0;
   2274 			atomic_store_relaxed(&E->needed,
   2275 			    ENTROPY_CAPACITY*NBBY);
   2276 			mutex_exit(&E->lock);
   2277 		}
   2278 
   2279 		/*
   2280 		 * If we changed any of the estimation or collection
   2281 		 * flags, request new samples from everyone -- either
   2282 		 * to make up for what we just lost, or to get new
   2283 		 * samples from what we just added.
   2284 		 */
   2285 		if (request) {
   2286 			mutex_enter(&E->lock);
   2287 			entropy_request(ENTROPY_CAPACITY);
   2288 			mutex_exit(&E->lock);
   2289 		}
   2290 		break;
   2291 	}
   2292 	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
   2293 		rnddata_t *rdata = data;
   2294 		unsigned entropybits = 0;
   2295 
   2296 		if (!atomic_load_relaxed(&entropy_collection))
   2297 			break;	/* thanks but no thanks */
   2298 		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
   2299 			return EINVAL;
   2300 
   2301 		/*
   2302 		 * This ioctl serves as the userland alternative a
   2303 		 * bootloader-provided seed -- typically furnished by
   2304 		 * /etc/rc.d/random_seed.  We accept the user's entropy
   2305 		 * claim only if
   2306 		 *
   2307 		 * (a) the user is privileged, and
   2308 		 * (b) we have not entered a bootloader seed.
   2309 		 *
   2310 		 * under the assumption that the user may use this to
   2311 		 * load a seed from disk that we have already loaded
   2312 		 * from the bootloader, so we don't double-count it.
   2313 		 */
   2314 		if (privileged && rdata->entropy && rdata->len) {
   2315 			mutex_enter(&E->lock);
   2316 			if (!E->seeded) {
   2317 				entropybits = MIN(rdata->entropy,
   2318 				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
   2319 				E->seeded = true;
   2320 			}
   2321 			mutex_exit(&E->lock);
   2322 		}
   2323 
   2324 		/* Enter the data and consolidate entropy.  */
   2325 		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
   2326 		    entropybits);
   2327 		entropy_consolidate();
   2328 		break;
   2329 	}
   2330 	default:
   2331 		error = ENOTTY;
   2332 	}
   2333 
   2334 	/* Return any error that may have come up.  */
   2335 	return error;
   2336 }
   2337 
   2338 /* Legacy entry points */
   2339 
   2340 void
   2341 rnd_seed(void *seed, size_t len)
   2342 {
   2343 
   2344 	if (len != sizeof(rndsave_t)) {
   2345 		printf("entropy: invalid seed length: %zu,"
   2346 		    " expected sizeof(rndsave_t) = %zu\n",
   2347 		    len, sizeof(rndsave_t));
   2348 		return;
   2349 	}
   2350 	entropy_seed(seed);
   2351 }
   2352 
   2353 void
   2354 rnd_init(void)
   2355 {
   2356 
   2357 	entropy_init();
   2358 }
   2359 
   2360 void
   2361 rnd_init_softint(void)
   2362 {
   2363 
   2364 	entropy_init_late();
   2365 }
   2366 
   2367 int
   2368 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
   2369 {
   2370 
   2371 	return entropy_ioctl(cmd, data);
   2372 }
   2373