Home | History | Annotate | Line # | Download | only in kern
kern_entropy.c revision 1.24.2.2
      1  1.24.2.2   thorpej /*	$NetBSD: kern_entropy.c,v 1.24.2.2 2021/04/03 22:29:00 thorpej Exp $	*/
      2       1.1  riastrad 
      3       1.1  riastrad /*-
      4       1.1  riastrad  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5       1.1  riastrad  * All rights reserved.
      6       1.1  riastrad  *
      7       1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1  riastrad  * by Taylor R. Campbell.
      9       1.1  riastrad  *
     10       1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11       1.1  riastrad  * modification, are permitted provided that the following conditions
     12       1.1  riastrad  * are met:
     13       1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14       1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15       1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17       1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18       1.1  riastrad  *
     19       1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1  riastrad  */
     31       1.1  riastrad 
     32       1.1  riastrad /*
     33       1.1  riastrad  * Entropy subsystem
     34       1.1  riastrad  *
     35       1.1  riastrad  *	* Each CPU maintains a per-CPU entropy pool so that gathering
     36       1.1  riastrad  *	  entropy requires no interprocessor synchronization, except
     37       1.1  riastrad  *	  early at boot when we may be scrambling to gather entropy as
     38       1.1  riastrad  *	  soon as possible.
     39       1.1  riastrad  *
     40       1.1  riastrad  *	  - entropy_enter gathers entropy and never drops it on the
     41       1.1  riastrad  *	    floor, at the cost of sometimes having to do cryptography.
     42       1.1  riastrad  *
     43       1.1  riastrad  *	  - entropy_enter_intr gathers entropy or drops it on the
     44       1.1  riastrad  *	    floor, with low latency.  Work to stir the pool or kick the
     45       1.1  riastrad  *	    housekeeping thread is scheduled in soft interrupts.
     46       1.1  riastrad  *
     47       1.1  riastrad  *	* entropy_enter immediately enters into the global pool if it
     48       1.1  riastrad  *	  can transition to full entropy in one swell foop.  Otherwise,
     49       1.1  riastrad  *	  it defers to a housekeeping thread that consolidates entropy,
     50       1.1  riastrad  *	  but only when the CPUs collectively have full entropy, in
     51       1.1  riastrad  *	  order to mitigate iterative-guessing attacks.
     52       1.1  riastrad  *
     53       1.1  riastrad  *	* The entropy housekeeping thread continues to consolidate
     54       1.1  riastrad  *	  entropy even after we think we have full entropy, in case we
     55       1.1  riastrad  *	  are wrong, but is limited to one discretionary consolidation
     56       1.1  riastrad  *	  per minute, and only when new entropy is actually coming in,
     57       1.1  riastrad  *	  to limit performance impact.
     58       1.1  riastrad  *
     59       1.1  riastrad  *	* The entropy epoch is the number that changes when we
     60       1.1  riastrad  *	  transition from partial entropy to full entropy, so that
     61       1.1  riastrad  *	  users can easily determine when to reseed.  This also
     62       1.1  riastrad  *	  facilitates an operator explicitly causing everything to
     63      1.13  riastrad  *	  reseed by sysctl -w kern.entropy.consolidate=1.
     64       1.1  riastrad  *
     65       1.1  riastrad  *	* No entropy estimation based on the sample values, which is a
     66       1.1  riastrad  *	  contradiction in terms and a potential source of side
     67       1.1  riastrad  *	  channels.  It is the responsibility of the driver author to
     68       1.1  riastrad  *	  study how predictable the physical source of input can ever
     69       1.1  riastrad  *	  be, and to furnish a lower bound on the amount of entropy it
     70       1.1  riastrad  *	  has.
     71       1.1  riastrad  *
     72       1.1  riastrad  *	* Entropy depletion is available for testing (or if you're into
     73       1.1  riastrad  *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
     74       1.1  riastrad  *	  the logic to support it is small, to minimize chance of bugs.
     75       1.1  riastrad  */
     76       1.1  riastrad 
     77       1.1  riastrad #include <sys/cdefs.h>
     78  1.24.2.2   thorpej __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.24.2.2 2021/04/03 22:29:00 thorpej Exp $");
     79       1.1  riastrad 
     80       1.1  riastrad #include <sys/param.h>
     81       1.1  riastrad #include <sys/types.h>
     82       1.1  riastrad #include <sys/atomic.h>
     83       1.1  riastrad #include <sys/compat_stub.h>
     84       1.1  riastrad #include <sys/condvar.h>
     85       1.1  riastrad #include <sys/cpu.h>
     86       1.1  riastrad #include <sys/entropy.h>
     87       1.1  riastrad #include <sys/errno.h>
     88       1.1  riastrad #include <sys/evcnt.h>
     89       1.1  riastrad #include <sys/event.h>
     90       1.1  riastrad #include <sys/file.h>
     91       1.1  riastrad #include <sys/intr.h>
     92       1.1  riastrad #include <sys/kauth.h>
     93       1.1  riastrad #include <sys/kernel.h>
     94       1.1  riastrad #include <sys/kmem.h>
     95       1.1  riastrad #include <sys/kthread.h>
     96       1.1  riastrad #include <sys/module_hook.h>
     97       1.1  riastrad #include <sys/mutex.h>
     98       1.1  riastrad #include <sys/percpu.h>
     99       1.1  riastrad #include <sys/poll.h>
    100       1.1  riastrad #include <sys/queue.h>
    101  1.24.2.2   thorpej #include <sys/reboot.h>
    102       1.1  riastrad #include <sys/rnd.h>		/* legacy kernel API */
    103       1.1  riastrad #include <sys/rndio.h>		/* userland ioctl interface */
    104       1.1  riastrad #include <sys/rndsource.h>	/* kernel rndsource driver API */
    105       1.1  riastrad #include <sys/select.h>
    106       1.1  riastrad #include <sys/selinfo.h>
    107       1.1  riastrad #include <sys/sha1.h>		/* for boot seed checksum */
    108       1.1  riastrad #include <sys/stdint.h>
    109       1.1  riastrad #include <sys/sysctl.h>
    110  1.24.2.2   thorpej #include <sys/syslog.h>
    111       1.1  riastrad #include <sys/systm.h>
    112       1.1  riastrad #include <sys/time.h>
    113       1.1  riastrad #include <sys/xcall.h>
    114       1.1  riastrad 
    115       1.1  riastrad #include <lib/libkern/entpool.h>
    116       1.1  riastrad 
    117       1.1  riastrad #include <machine/limits.h>
    118       1.1  riastrad 
    119       1.1  riastrad #ifdef __HAVE_CPU_COUNTER
    120       1.1  riastrad #include <machine/cpu_counter.h>
    121       1.1  riastrad #endif
    122       1.1  riastrad 
    123       1.1  riastrad /*
    124       1.1  riastrad  * struct entropy_cpu
    125       1.1  riastrad  *
    126       1.1  riastrad  *	Per-CPU entropy state.  The pool is allocated separately
    127       1.1  riastrad  *	because percpu(9) sometimes moves per-CPU objects around
    128       1.1  riastrad  *	without zeroing them, which would lead to unwanted copies of
    129       1.1  riastrad  *	sensitive secrets.  The evcnt is allocated separately becuase
    130       1.1  riastrad  *	evcnt(9) assumes it stays put in memory.
    131       1.1  riastrad  */
    132       1.1  riastrad struct entropy_cpu {
    133       1.1  riastrad 	struct evcnt		*ec_softint_evcnt;
    134       1.1  riastrad 	struct entpool		*ec_pool;
    135       1.1  riastrad 	unsigned		ec_pending;
    136       1.1  riastrad 	bool			ec_locked;
    137       1.1  riastrad };
    138       1.1  riastrad 
    139       1.1  riastrad /*
    140       1.1  riastrad  * struct rndsource_cpu
    141       1.1  riastrad  *
    142       1.1  riastrad  *	Per-CPU rndsource state.
    143       1.1  riastrad  */
    144       1.1  riastrad struct rndsource_cpu {
    145  1.24.2.2   thorpej 	unsigned		rc_entropybits;
    146  1.24.2.2   thorpej 	unsigned		rc_timesamples;
    147  1.24.2.2   thorpej 	unsigned		rc_datasamples;
    148       1.1  riastrad };
    149       1.1  riastrad 
    150       1.1  riastrad /*
    151       1.1  riastrad  * entropy_global (a.k.a. E for short in this file)
    152       1.1  riastrad  *
    153       1.1  riastrad  *	Global entropy state.  Writes protected by the global lock.
    154       1.1  riastrad  *	Some fields, marked (A), can be read outside the lock, and are
    155       1.1  riastrad  *	maintained with atomic_load/store_relaxed.
    156       1.1  riastrad  */
    157       1.1  riastrad struct {
    158       1.1  riastrad 	kmutex_t	lock;		/* covers all global state */
    159       1.1  riastrad 	struct entpool	pool;		/* global pool for extraction */
    160       1.1  riastrad 	unsigned	needed;		/* (A) needed globally */
    161       1.1  riastrad 	unsigned	pending;	/* (A) pending in per-CPU pools */
    162       1.1  riastrad 	unsigned	timestamp;	/* (A) time of last consolidation */
    163       1.1  riastrad 	unsigned	epoch;		/* (A) changes when needed -> 0 */
    164       1.1  riastrad 	kcondvar_t	cv;		/* notifies state changes */
    165       1.1  riastrad 	struct selinfo	selq;		/* notifies needed -> 0 */
    166       1.4  riastrad 	struct lwp	*sourcelock;	/* lock on list of sources */
    167  1.24.2.2   thorpej 	kcondvar_t	sourcelock_cv;	/* notifies sourcelock release */
    168       1.1  riastrad 	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
    169       1.1  riastrad 	enum entropy_stage {
    170       1.1  riastrad 		ENTROPY_COLD = 0, /* single-threaded */
    171       1.1  riastrad 		ENTROPY_WARM,	  /* multi-threaded at boot before CPUs */
    172       1.1  riastrad 		ENTROPY_HOT,	  /* multi-threaded multi-CPU */
    173       1.1  riastrad 	}		stage;
    174       1.1  riastrad 	bool		consolidate;	/* kick thread to consolidate */
    175       1.1  riastrad 	bool		seed_rndsource;	/* true if seed source is attached */
    176       1.1  riastrad 	bool		seeded;		/* true if seed file already loaded */
    177       1.1  riastrad } entropy_global __cacheline_aligned = {
    178       1.1  riastrad 	/* Fields that must be initialized when the kernel is loaded.  */
    179       1.1  riastrad 	.needed = ENTROPY_CAPACITY*NBBY,
    180      1.14  riastrad 	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
    181       1.1  riastrad 	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
    182       1.1  riastrad 	.stage = ENTROPY_COLD,
    183       1.1  riastrad };
    184       1.1  riastrad 
    185       1.1  riastrad #define	E	(&entropy_global)	/* declutter */
    186       1.1  riastrad 
    187       1.1  riastrad /* Read-mostly globals */
    188       1.1  riastrad static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
    189       1.1  riastrad static void		*entropy_sih __read_mostly; /* softint handler */
    190       1.1  riastrad static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
    191       1.1  riastrad 
    192       1.1  riastrad int rnd_initial_entropy __read_mostly; /* XXX legacy */
    193       1.1  riastrad 
    194       1.1  riastrad static struct krndsource seed_rndsource __read_mostly;
    195       1.1  riastrad 
    196       1.1  riastrad /*
    197       1.1  riastrad  * Event counters
    198       1.1  riastrad  *
    199       1.1  riastrad  *	Must be careful with adding these because they can serve as
    200       1.1  riastrad  *	side channels.
    201       1.1  riastrad  */
    202       1.1  riastrad static struct evcnt entropy_discretionary_evcnt =
    203       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
    204       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
    205       1.1  riastrad static struct evcnt entropy_immediate_evcnt =
    206       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
    207       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
    208       1.1  riastrad static struct evcnt entropy_partial_evcnt =
    209       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
    210       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
    211       1.1  riastrad static struct evcnt entropy_consolidate_evcnt =
    212       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
    213       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
    214       1.1  riastrad static struct evcnt entropy_extract_intr_evcnt =
    215       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr");
    216       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt);
    217       1.1  riastrad static struct evcnt entropy_extract_fail_evcnt =
    218       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
    219       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
    220       1.1  riastrad static struct evcnt entropy_request_evcnt =
    221       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
    222       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_request_evcnt);
    223       1.1  riastrad static struct evcnt entropy_deplete_evcnt =
    224       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
    225       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
    226       1.1  riastrad static struct evcnt entropy_notify_evcnt =
    227       1.1  riastrad     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
    228       1.1  riastrad EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
    229       1.1  riastrad 
    230       1.1  riastrad /* Sysctl knobs */
    231      1.17  riastrad static bool	entropy_collection = 1;
    232      1.17  riastrad static bool	entropy_depletion = 0; /* Silly!  */
    233       1.1  riastrad 
    234       1.1  riastrad static const struct sysctlnode	*entropy_sysctlroot;
    235       1.1  riastrad static struct sysctllog		*entropy_sysctllog;
    236       1.1  riastrad 
    237       1.1  riastrad /* Forward declarations */
    238       1.1  riastrad static void	entropy_init_cpu(void *, void *, struct cpu_info *);
    239       1.1  riastrad static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
    240       1.1  riastrad static void	entropy_account_cpu(struct entropy_cpu *);
    241       1.1  riastrad static void	entropy_enter(const void *, size_t, unsigned);
    242       1.1  riastrad static bool	entropy_enter_intr(const void *, size_t, unsigned);
    243       1.1  riastrad static void	entropy_softintr(void *);
    244       1.1  riastrad static void	entropy_thread(void *);
    245       1.1  riastrad static uint32_t	entropy_pending(void);
    246       1.1  riastrad static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
    247      1.13  riastrad static void	entropy_do_consolidate(void);
    248      1.13  riastrad static void	entropy_consolidate_xc(void *, void *);
    249       1.1  riastrad static void	entropy_notify(void);
    250       1.1  riastrad static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
    251      1.10  riastrad static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
    252       1.1  riastrad static void	filt_entropy_read_detach(struct knote *);
    253       1.1  riastrad static int	filt_entropy_read_event(struct knote *, long);
    254       1.1  riastrad static void	entropy_request(size_t);
    255       1.1  riastrad static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
    256  1.24.2.2   thorpej 		    uint32_t, uint32_t);
    257       1.1  riastrad static unsigned	rndsource_entropybits(struct krndsource *);
    258       1.1  riastrad static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
    259       1.1  riastrad static void	rndsource_to_user(struct krndsource *, rndsource_t *);
    260       1.1  riastrad static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
    261  1.24.2.2   thorpej static void	rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
    262       1.1  riastrad 
    263       1.1  riastrad /*
    264       1.1  riastrad  * entropy_timer()
    265       1.1  riastrad  *
    266       1.1  riastrad  *	Cycle counter, time counter, or anything that changes a wee bit
    267       1.1  riastrad  *	unpredictably.
    268       1.1  riastrad  */
    269       1.1  riastrad static inline uint32_t
    270       1.1  riastrad entropy_timer(void)
    271       1.1  riastrad {
    272       1.1  riastrad 	struct bintime bt;
    273       1.1  riastrad 	uint32_t v;
    274       1.1  riastrad 
    275       1.1  riastrad 	/* If we have a CPU cycle counter, use the low 32 bits.  */
    276       1.1  riastrad #ifdef __HAVE_CPU_COUNTER
    277       1.1  riastrad 	if (__predict_true(cpu_hascounter()))
    278       1.1  riastrad 		return cpu_counter32();
    279       1.1  riastrad #endif	/* __HAVE_CPU_COUNTER */
    280       1.1  riastrad 
    281       1.1  riastrad 	/* If we're cold, tough.  Can't binuptime while cold.  */
    282       1.1  riastrad 	if (__predict_false(cold))
    283       1.1  riastrad 		return 0;
    284       1.1  riastrad 
    285       1.1  riastrad 	/* Fold the 128 bits of binuptime into 32 bits.  */
    286       1.1  riastrad 	binuptime(&bt);
    287       1.1  riastrad 	v = bt.frac;
    288       1.1  riastrad 	v ^= bt.frac >> 32;
    289       1.1  riastrad 	v ^= bt.sec;
    290       1.1  riastrad 	v ^= bt.sec >> 32;
    291       1.1  riastrad 	return v;
    292       1.1  riastrad }
    293       1.1  riastrad 
    294       1.1  riastrad static void
    295       1.1  riastrad attach_seed_rndsource(void)
    296       1.1  riastrad {
    297       1.1  riastrad 
    298       1.1  riastrad 	/*
    299       1.1  riastrad 	 * First called no later than entropy_init, while we are still
    300       1.1  riastrad 	 * single-threaded, so no need for RUN_ONCE.
    301       1.1  riastrad 	 */
    302       1.1  riastrad 	if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
    303       1.1  riastrad 		return;
    304       1.1  riastrad 	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
    305       1.1  riastrad 	    RND_FLAG_COLLECT_VALUE);
    306       1.1  riastrad 	E->seed_rndsource = true;
    307       1.1  riastrad }
    308       1.1  riastrad 
    309       1.1  riastrad /*
    310       1.1  riastrad  * entropy_init()
    311       1.1  riastrad  *
    312       1.1  riastrad  *	Initialize the entropy subsystem.  Panic on failure.
    313       1.1  riastrad  *
    314       1.1  riastrad  *	Requires percpu(9) and sysctl(9) to be initialized.
    315       1.1  riastrad  */
    316       1.1  riastrad static void
    317       1.1  riastrad entropy_init(void)
    318       1.1  riastrad {
    319       1.1  riastrad 	uint32_t extra[2];
    320       1.1  riastrad 	struct krndsource *rs;
    321       1.1  riastrad 	unsigned i = 0;
    322       1.1  riastrad 
    323       1.1  riastrad 	KASSERT(E->stage == ENTROPY_COLD);
    324       1.1  riastrad 
    325       1.1  riastrad 	/* Grab some cycle counts early at boot.  */
    326       1.1  riastrad 	extra[i++] = entropy_timer();
    327       1.1  riastrad 
    328       1.1  riastrad 	/* Run the entropy pool cryptography self-test.  */
    329       1.1  riastrad 	if (entpool_selftest() == -1)
    330       1.1  riastrad 		panic("entropy pool crypto self-test failed");
    331       1.1  riastrad 
    332       1.1  riastrad 	/* Create the sysctl directory.  */
    333       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
    334       1.1  riastrad 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
    335       1.1  riastrad 	    SYSCTL_DESCR("Entropy (random number sources) options"),
    336       1.1  riastrad 	    NULL, 0, NULL, 0,
    337       1.1  riastrad 	    CTL_KERN, CTL_CREATE, CTL_EOL);
    338       1.1  riastrad 
    339       1.1  riastrad 	/* Create the sysctl knobs.  */
    340       1.1  riastrad 	/* XXX These shouldn't be writable at securelevel>0.  */
    341       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    342       1.1  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
    343       1.1  riastrad 	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
    344       1.1  riastrad 	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
    345       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    346       1.1  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
    347       1.1  riastrad 	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
    348       1.1  riastrad 	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
    349       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    350       1.1  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
    351       1.1  riastrad 	    SYSCTL_DESCR("Trigger entropy consolidation now"),
    352       1.1  riastrad 	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    353      1.10  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    354      1.10  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
    355      1.10  riastrad 	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
    356      1.10  riastrad 	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    357       1.1  riastrad 	/* XXX These should maybe not be readable at securelevel>0.  */
    358       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    359       1.1  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    360       1.1  riastrad 	    "needed", SYSCTL_DESCR("Systemwide entropy deficit"),
    361       1.1  riastrad 	    NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL);
    362       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    363       1.1  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    364       1.1  riastrad 	    "pending", SYSCTL_DESCR("Entropy pending on CPUs"),
    365       1.1  riastrad 	    NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL);
    366       1.1  riastrad 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
    367       1.1  riastrad 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
    368       1.1  riastrad 	    "epoch", SYSCTL_DESCR("Entropy epoch"),
    369       1.1  riastrad 	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
    370       1.1  riastrad 
    371       1.1  riastrad 	/* Initialize the global state for multithreaded operation.  */
    372       1.1  riastrad 	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM);
    373       1.1  riastrad 	cv_init(&E->cv, "entropy");
    374       1.1  riastrad 	selinit(&E->selq);
    375  1.24.2.2   thorpej 	cv_init(&E->sourcelock_cv, "entsrclock");
    376       1.1  riastrad 
    377       1.1  riastrad 	/* Make sure the seed source is attached.  */
    378       1.1  riastrad 	attach_seed_rndsource();
    379       1.1  riastrad 
    380       1.1  riastrad 	/* Note if the bootloader didn't provide a seed.  */
    381       1.1  riastrad 	if (!E->seeded)
    382  1.24.2.2   thorpej 		aprint_debug("entropy: no seed from bootloader\n");
    383       1.1  riastrad 
    384       1.1  riastrad 	/* Allocate the per-CPU records for all early entropy sources.  */
    385       1.1  riastrad 	LIST_FOREACH(rs, &E->sources, list)
    386       1.1  riastrad 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
    387       1.1  riastrad 
    388       1.1  riastrad 	/* Enter the boot cycle count to get started.  */
    389       1.1  riastrad 	extra[i++] = entropy_timer();
    390       1.1  riastrad 	KASSERT(i == __arraycount(extra));
    391       1.1  riastrad 	entropy_enter(extra, sizeof extra, 0);
    392       1.1  riastrad 	explicit_memset(extra, 0, sizeof extra);
    393       1.1  riastrad 
    394       1.1  riastrad 	/* We are now ready for multi-threaded operation.  */
    395       1.1  riastrad 	E->stage = ENTROPY_WARM;
    396       1.1  riastrad }
    397       1.1  riastrad 
    398       1.1  riastrad /*
    399       1.1  riastrad  * entropy_init_late()
    400       1.1  riastrad  *
    401       1.1  riastrad  *	Late initialization.  Panic on failure.
    402       1.1  riastrad  *
    403       1.1  riastrad  *	Requires CPUs to have been detected and LWPs to have started.
    404       1.1  riastrad  */
    405       1.1  riastrad static void
    406       1.1  riastrad entropy_init_late(void)
    407       1.1  riastrad {
    408       1.1  riastrad 	int error;
    409       1.1  riastrad 
    410       1.1  riastrad 	KASSERT(E->stage == ENTROPY_WARM);
    411       1.1  riastrad 
    412       1.1  riastrad 	/* Allocate and initialize the per-CPU state.  */
    413       1.1  riastrad 	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
    414       1.1  riastrad 	    entropy_init_cpu, entropy_fini_cpu, NULL);
    415       1.1  riastrad 
    416       1.1  riastrad 	/*
    417       1.1  riastrad 	 * Establish the softint at the highest softint priority level.
    418       1.1  riastrad 	 * Must happen after CPU detection.
    419       1.1  riastrad 	 */
    420       1.1  riastrad 	entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
    421       1.1  riastrad 	    &entropy_softintr, NULL);
    422       1.1  riastrad 	if (entropy_sih == NULL)
    423       1.1  riastrad 		panic("unable to establish entropy softint");
    424       1.1  riastrad 
    425       1.1  riastrad 	/*
    426       1.1  riastrad 	 * Create the entropy housekeeping thread.  Must happen after
    427       1.1  riastrad 	 * lwpinit.
    428       1.1  riastrad 	 */
    429       1.1  riastrad 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
    430       1.1  riastrad 	    entropy_thread, NULL, &entropy_lwp, "entbutler");
    431       1.1  riastrad 	if (error)
    432       1.1  riastrad 		panic("unable to create entropy housekeeping thread: %d",
    433       1.1  riastrad 		    error);
    434       1.1  riastrad 
    435       1.1  riastrad 	/*
    436       1.1  riastrad 	 * Wait until the per-CPU initialization has hit all CPUs
    437       1.1  riastrad 	 * before proceeding to mark the entropy system hot.
    438       1.1  riastrad 	 */
    439       1.1  riastrad 	xc_barrier(XC_HIGHPRI);
    440       1.1  riastrad 	E->stage = ENTROPY_HOT;
    441       1.1  riastrad }
    442       1.1  riastrad 
    443       1.1  riastrad /*
    444       1.1  riastrad  * entropy_init_cpu(ptr, cookie, ci)
    445       1.1  riastrad  *
    446       1.1  riastrad  *	percpu(9) constructor for per-CPU entropy pool.
    447       1.1  riastrad  */
    448       1.1  riastrad static void
    449       1.1  riastrad entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    450       1.1  riastrad {
    451       1.1  riastrad 	struct entropy_cpu *ec = ptr;
    452       1.1  riastrad 
    453       1.1  riastrad 	ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt),
    454       1.1  riastrad 	    KM_SLEEP);
    455       1.1  riastrad 	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
    456       1.1  riastrad 	ec->ec_pending = 0;
    457       1.1  riastrad 	ec->ec_locked = false;
    458       1.1  riastrad 
    459       1.1  riastrad 	evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL,
    460       1.1  riastrad 	    ci->ci_cpuname, "entropy softint");
    461       1.1  riastrad }
    462       1.1  riastrad 
    463       1.1  riastrad /*
    464       1.1  riastrad  * entropy_fini_cpu(ptr, cookie, ci)
    465       1.1  riastrad  *
    466       1.1  riastrad  *	percpu(9) destructor for per-CPU entropy pool.
    467       1.1  riastrad  */
    468       1.1  riastrad static void
    469       1.1  riastrad entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    470       1.1  riastrad {
    471       1.1  riastrad 	struct entropy_cpu *ec = ptr;
    472       1.1  riastrad 
    473       1.1  riastrad 	/*
    474       1.1  riastrad 	 * Zero any lingering data.  Disclosure of the per-CPU pool
    475       1.1  riastrad 	 * shouldn't retroactively affect the security of any keys
    476       1.1  riastrad 	 * generated, because entpool(9) erases whatever we have just
    477       1.1  riastrad 	 * drawn out of any pool, but better safe than sorry.
    478       1.1  riastrad 	 */
    479       1.1  riastrad 	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
    480       1.1  riastrad 
    481       1.1  riastrad 	evcnt_detach(ec->ec_softint_evcnt);
    482       1.1  riastrad 
    483       1.1  riastrad 	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
    484       1.1  riastrad 	kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt));
    485       1.1  riastrad }
    486       1.1  riastrad 
    487       1.1  riastrad /*
    488       1.1  riastrad  * entropy_seed(seed)
    489       1.1  riastrad  *
    490       1.1  riastrad  *	Seed the entropy pool with seed.  Meant to be called as early
    491       1.1  riastrad  *	as possible by the bootloader; may be called before or after
    492       1.1  riastrad  *	entropy_init.  Must be called before system reaches userland.
    493       1.1  riastrad  *	Must be called in thread or soft interrupt context, not in hard
    494       1.1  riastrad  *	interrupt context.  Must be called at most once.
    495       1.1  riastrad  *
    496       1.1  riastrad  *	Overwrites the seed in place.  Caller may then free the memory.
    497       1.1  riastrad  */
    498       1.1  riastrad static void
    499       1.1  riastrad entropy_seed(rndsave_t *seed)
    500       1.1  riastrad {
    501       1.1  riastrad 	SHA1_CTX ctx;
    502       1.1  riastrad 	uint8_t digest[SHA1_DIGEST_LENGTH];
    503       1.1  riastrad 	bool seeded;
    504       1.1  riastrad 
    505       1.1  riastrad 	/*
    506       1.1  riastrad 	 * Verify the checksum.  If the checksum fails, take the data
    507       1.1  riastrad 	 * but ignore the entropy estimate -- the file may have been
    508       1.1  riastrad 	 * incompletely written with garbage, which is harmless to add
    509       1.1  riastrad 	 * but may not be as unpredictable as alleged.
    510       1.1  riastrad 	 */
    511       1.1  riastrad 	SHA1Init(&ctx);
    512       1.1  riastrad 	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
    513       1.1  riastrad 	SHA1Update(&ctx, seed->data, sizeof(seed->data));
    514       1.1  riastrad 	SHA1Final(digest, &ctx);
    515       1.1  riastrad 	CTASSERT(sizeof(seed->digest) == sizeof(digest));
    516       1.1  riastrad 	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
    517       1.1  riastrad 		printf("entropy: invalid seed checksum\n");
    518       1.1  riastrad 		seed->entropy = 0;
    519       1.1  riastrad 	}
    520       1.2  riastrad 	explicit_memset(&ctx, 0, sizeof ctx);
    521       1.1  riastrad 	explicit_memset(digest, 0, sizeof digest);
    522       1.1  riastrad 
    523       1.2  riastrad 	/*
    524       1.2  riastrad 	 * If the entropy is insensibly large, try byte-swapping.
    525       1.2  riastrad 	 * Otherwise assume the file is corrupted and act as though it
    526       1.2  riastrad 	 * has zero entropy.
    527       1.2  riastrad 	 */
    528       1.2  riastrad 	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
    529       1.2  riastrad 		seed->entropy = bswap32(seed->entropy);
    530       1.2  riastrad 		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
    531       1.2  riastrad 			seed->entropy = 0;
    532       1.2  riastrad 	}
    533       1.2  riastrad 
    534       1.1  riastrad 	/* Make sure the seed source is attached.  */
    535       1.1  riastrad 	attach_seed_rndsource();
    536       1.1  riastrad 
    537       1.1  riastrad 	/* Test and set E->seeded.  */
    538       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
    539       1.1  riastrad 		mutex_enter(&E->lock);
    540       1.1  riastrad 	seeded = E->seeded;
    541      1.11  riastrad 	E->seeded = (seed->entropy > 0);
    542       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
    543       1.1  riastrad 		mutex_exit(&E->lock);
    544       1.1  riastrad 
    545       1.1  riastrad 	/*
    546       1.1  riastrad 	 * If we've been seeded, may be re-entering the same seed
    547       1.1  riastrad 	 * (e.g., bootloader vs module init, or something).  No harm in
    548       1.1  riastrad 	 * entering it twice, but it contributes no additional entropy.
    549       1.1  riastrad 	 */
    550       1.1  riastrad 	if (seeded) {
    551       1.1  riastrad 		printf("entropy: double-seeded by bootloader\n");
    552       1.1  riastrad 		seed->entropy = 0;
    553       1.1  riastrad 	} else {
    554      1.11  riastrad 		printf("entropy: entering seed from bootloader"
    555      1.11  riastrad 		    " with %u bits of entropy\n", (unsigned)seed->entropy);
    556       1.1  riastrad 	}
    557       1.1  riastrad 
    558       1.1  riastrad 	/* Enter it into the pool and promptly zero it.  */
    559       1.1  riastrad 	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
    560       1.1  riastrad 	    seed->entropy);
    561       1.1  riastrad 	explicit_memset(seed, 0, sizeof(*seed));
    562       1.1  riastrad }
    563       1.1  riastrad 
    564       1.1  riastrad /*
    565       1.1  riastrad  * entropy_bootrequest()
    566       1.1  riastrad  *
    567       1.1  riastrad  *	Request entropy from all sources at boot, once config is
    568       1.1  riastrad  *	complete and interrupts are running.
    569       1.1  riastrad  */
    570       1.1  riastrad void
    571       1.1  riastrad entropy_bootrequest(void)
    572       1.1  riastrad {
    573       1.1  riastrad 
    574       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
    575       1.1  riastrad 
    576       1.1  riastrad 	/*
    577       1.1  riastrad 	 * Request enough to satisfy the maximum entropy shortage.
    578       1.1  riastrad 	 * This is harmless overkill if the bootloader provided a seed.
    579       1.1  riastrad 	 */
    580       1.1  riastrad 	mutex_enter(&E->lock);
    581       1.1  riastrad 	entropy_request(ENTROPY_CAPACITY);
    582       1.1  riastrad 	mutex_exit(&E->lock);
    583       1.1  riastrad }
    584       1.1  riastrad 
    585       1.1  riastrad /*
    586       1.1  riastrad  * entropy_epoch()
    587       1.1  riastrad  *
    588       1.1  riastrad  *	Returns the current entropy epoch.  If this changes, you should
    589      1.14  riastrad  *	reseed.  If -1, means system entropy has not yet reached full
    590      1.14  riastrad  *	entropy or been explicitly consolidated; never reverts back to
    591      1.14  riastrad  *	-1.  Never zero, so you can always use zero as an uninitialized
    592      1.14  riastrad  *	sentinel value meaning `reseed ASAP'.
    593       1.1  riastrad  *
    594       1.1  riastrad  *	Usage model:
    595       1.1  riastrad  *
    596       1.1  riastrad  *		struct foo {
    597       1.1  riastrad  *			struct crypto_prng prng;
    598       1.1  riastrad  *			unsigned epoch;
    599       1.1  riastrad  *		} *foo;
    600       1.1  riastrad  *
    601       1.1  riastrad  *		unsigned epoch = entropy_epoch();
    602       1.1  riastrad  *		if (__predict_false(epoch != foo->epoch)) {
    603       1.1  riastrad  *			uint8_t seed[32];
    604       1.1  riastrad  *			if (entropy_extract(seed, sizeof seed, 0) != 0)
    605       1.1  riastrad  *				warn("no entropy");
    606       1.1  riastrad  *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
    607       1.1  riastrad  *			foo->epoch = epoch;
    608       1.1  riastrad  *		}
    609       1.1  riastrad  */
    610       1.1  riastrad unsigned
    611       1.1  riastrad entropy_epoch(void)
    612       1.1  riastrad {
    613       1.1  riastrad 
    614       1.1  riastrad 	/*
    615       1.1  riastrad 	 * Unsigned int, so no need for seqlock for an atomic read, but
    616       1.1  riastrad 	 * make sure we read it afresh each time.
    617       1.1  riastrad 	 */
    618       1.1  riastrad 	return atomic_load_relaxed(&E->epoch);
    619       1.1  riastrad }
    620       1.1  riastrad 
    621       1.1  riastrad /*
    622      1.23  riastrad  * entropy_ready()
    623      1.23  riastrad  *
    624      1.23  riastrad  *	True if the entropy pool has full entropy.
    625      1.23  riastrad  */
    626      1.23  riastrad bool
    627      1.23  riastrad entropy_ready(void)
    628      1.23  riastrad {
    629      1.23  riastrad 
    630      1.23  riastrad 	return atomic_load_relaxed(&E->needed) == 0;
    631      1.23  riastrad }
    632      1.23  riastrad 
    633      1.23  riastrad /*
    634       1.1  riastrad  * entropy_account_cpu(ec)
    635       1.1  riastrad  *
    636       1.1  riastrad  *	Consider whether to consolidate entropy into the global pool
    637       1.1  riastrad  *	after we just added some into the current CPU's pending pool.
    638       1.1  riastrad  *
    639       1.1  riastrad  *	- If this CPU can provide enough entropy now, do so.
    640       1.1  riastrad  *
    641       1.1  riastrad  *	- If this and whatever else is available on other CPUs can
    642       1.1  riastrad  *	  provide enough entropy, kick the consolidation thread.
    643       1.1  riastrad  *
    644       1.1  riastrad  *	- Otherwise, do as little as possible, except maybe consolidate
    645       1.1  riastrad  *	  entropy at most once a minute.
    646       1.1  riastrad  *
    647       1.1  riastrad  *	Caller must be bound to a CPU and therefore have exclusive
    648       1.1  riastrad  *	access to ec.  Will acquire and release the global lock.
    649       1.1  riastrad  */
    650       1.1  riastrad static void
    651       1.1  riastrad entropy_account_cpu(struct entropy_cpu *ec)
    652       1.1  riastrad {
    653       1.1  riastrad 	unsigned diff;
    654       1.1  riastrad 
    655       1.1  riastrad 	KASSERT(E->stage == ENTROPY_HOT);
    656       1.1  riastrad 
    657       1.1  riastrad 	/*
    658       1.1  riastrad 	 * If there's no entropy needed, and entropy has been
    659       1.1  riastrad 	 * consolidated in the last minute, do nothing.
    660       1.1  riastrad 	 */
    661       1.1  riastrad 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
    662       1.1  riastrad 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
    663       1.1  riastrad 	    __predict_true((time_uptime - E->timestamp) <= 60))
    664       1.1  riastrad 		return;
    665       1.1  riastrad 
    666       1.1  riastrad 	/* If there's nothing pending, stop here.  */
    667       1.1  riastrad 	if (ec->ec_pending == 0)
    668       1.1  riastrad 		return;
    669       1.1  riastrad 
    670       1.1  riastrad 	/* Consider consolidation, under the lock.  */
    671       1.1  riastrad 	mutex_enter(&E->lock);
    672       1.1  riastrad 	if (E->needed != 0 && E->needed <= ec->ec_pending) {
    673       1.1  riastrad 		/*
    674       1.1  riastrad 		 * If we have not yet attained full entropy but we can
    675       1.1  riastrad 		 * now, do so.  This way we disseminate entropy
    676       1.1  riastrad 		 * promptly when it becomes available early at boot;
    677       1.1  riastrad 		 * otherwise we leave it to the entropy consolidation
    678       1.1  riastrad 		 * thread, which is rate-limited to mitigate side
    679       1.1  riastrad 		 * channels and abuse.
    680       1.1  riastrad 		 */
    681       1.1  riastrad 		uint8_t buf[ENTPOOL_CAPACITY];
    682       1.1  riastrad 
    683       1.1  riastrad 		/* Transfer from the local pool to the global pool.  */
    684       1.1  riastrad 		entpool_extract(ec->ec_pool, buf, sizeof buf);
    685       1.1  riastrad 		entpool_enter(&E->pool, buf, sizeof buf);
    686       1.1  riastrad 		atomic_store_relaxed(&ec->ec_pending, 0);
    687       1.1  riastrad 		atomic_store_relaxed(&E->needed, 0);
    688       1.1  riastrad 
    689       1.1  riastrad 		/* Notify waiters that we now have full entropy.  */
    690       1.1  riastrad 		entropy_notify();
    691       1.1  riastrad 		entropy_immediate_evcnt.ev_count++;
    692      1.18  riastrad 	} else {
    693       1.1  riastrad 		/* Record how much we can add to the global pool.  */
    694       1.1  riastrad 		diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending);
    695       1.1  riastrad 		E->pending += diff;
    696       1.1  riastrad 		atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff);
    697       1.1  riastrad 
    698       1.1  riastrad 		/*
    699       1.1  riastrad 		 * This should have made a difference unless we were
    700       1.1  riastrad 		 * already saturated.
    701       1.1  riastrad 		 */
    702       1.1  riastrad 		KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY);
    703       1.1  riastrad 		KASSERT(E->pending);
    704       1.1  riastrad 
    705       1.1  riastrad 		if (E->needed <= E->pending) {
    706       1.1  riastrad 			/*
    707       1.1  riastrad 			 * Enough entropy between all the per-CPU
    708       1.1  riastrad 			 * pools.  Wake up the housekeeping thread.
    709       1.1  riastrad 			 *
    710       1.1  riastrad 			 * If we don't need any entropy, this doesn't
    711       1.1  riastrad 			 * mean much, but it is the only time we ever
    712       1.1  riastrad 			 * gather additional entropy in case the
    713       1.1  riastrad 			 * accounting has been overly optimistic.  This
    714       1.1  riastrad 			 * happens at most once a minute, so there's
    715       1.1  riastrad 			 * negligible performance cost.
    716       1.1  riastrad 			 */
    717       1.1  riastrad 			E->consolidate = true;
    718       1.1  riastrad 			cv_broadcast(&E->cv);
    719       1.1  riastrad 			if (E->needed == 0)
    720       1.1  riastrad 				entropy_discretionary_evcnt.ev_count++;
    721       1.1  riastrad 		} else {
    722       1.1  riastrad 			/* Can't get full entropy.  Keep gathering.  */
    723       1.1  riastrad 			entropy_partial_evcnt.ev_count++;
    724       1.1  riastrad 		}
    725       1.1  riastrad 	}
    726       1.1  riastrad 	mutex_exit(&E->lock);
    727       1.1  riastrad }
    728       1.1  riastrad 
    729       1.1  riastrad /*
    730       1.1  riastrad  * entropy_enter_early(buf, len, nbits)
    731       1.1  riastrad  *
    732       1.1  riastrad  *	Do entropy bookkeeping globally, before we have established
    733       1.1  riastrad  *	per-CPU pools.  Enter directly into the global pool in the hope
    734       1.1  riastrad  *	that we enter enough before the first entropy_extract to thwart
    735       1.1  riastrad  *	iterative-guessing attacks; entropy_extract will warn if not.
    736       1.1  riastrad  */
    737       1.1  riastrad static void
    738       1.1  riastrad entropy_enter_early(const void *buf, size_t len, unsigned nbits)
    739       1.1  riastrad {
    740       1.1  riastrad 	bool notify = false;
    741       1.1  riastrad 
    742       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
    743       1.1  riastrad 		mutex_enter(&E->lock);
    744       1.1  riastrad 
    745       1.1  riastrad 	/* Enter it into the pool.  */
    746       1.1  riastrad 	entpool_enter(&E->pool, buf, len);
    747       1.1  riastrad 
    748       1.1  riastrad 	/*
    749       1.1  riastrad 	 * Decide whether to notify reseed -- we will do so if either:
    750       1.1  riastrad 	 * (a) we transition from partial entropy to full entropy, or
    751       1.1  riastrad 	 * (b) we get a batch of full entropy all at once.
    752       1.1  riastrad 	 */
    753       1.1  riastrad 	notify |= (E->needed && E->needed <= nbits);
    754       1.1  riastrad 	notify |= (nbits >= ENTROPY_CAPACITY*NBBY);
    755       1.1  riastrad 
    756       1.1  riastrad 	/* Subtract from the needed count and notify if appropriate.  */
    757       1.1  riastrad 	E->needed -= MIN(E->needed, nbits);
    758       1.1  riastrad 	if (notify) {
    759       1.1  riastrad 		entropy_notify();
    760       1.1  riastrad 		entropy_immediate_evcnt.ev_count++;
    761       1.1  riastrad 	}
    762       1.1  riastrad 
    763       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
    764       1.1  riastrad 		mutex_exit(&E->lock);
    765       1.1  riastrad }
    766       1.1  riastrad 
    767       1.1  riastrad /*
    768       1.1  riastrad  * entropy_enter(buf, len, nbits)
    769       1.1  riastrad  *
    770       1.1  riastrad  *	Enter len bytes of data from buf into the system's entropy
    771       1.1  riastrad  *	pool, stirring as necessary when the internal buffer fills up.
    772       1.1  riastrad  *	nbits is a lower bound on the number of bits of entropy in the
    773       1.1  riastrad  *	process that led to this sample.
    774       1.1  riastrad  */
    775       1.1  riastrad static void
    776       1.1  riastrad entropy_enter(const void *buf, size_t len, unsigned nbits)
    777       1.1  riastrad {
    778       1.1  riastrad 	struct entropy_cpu *ec;
    779       1.1  riastrad 	uint32_t pending;
    780       1.1  riastrad 	int s;
    781       1.1  riastrad 
    782      1.16  riastrad 	KASSERTMSG(!cpu_intr_p(),
    783       1.1  riastrad 	    "use entropy_enter_intr from interrupt context");
    784       1.1  riastrad 	KASSERTMSG(howmany(nbits, NBBY) <= len,
    785       1.1  riastrad 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
    786       1.1  riastrad 
    787       1.1  riastrad 	/* If it's too early after boot, just use entropy_enter_early.  */
    788       1.1  riastrad 	if (__predict_false(E->stage < ENTROPY_HOT)) {
    789       1.1  riastrad 		entropy_enter_early(buf, len, nbits);
    790       1.1  riastrad 		return;
    791       1.1  riastrad 	}
    792       1.1  riastrad 
    793       1.1  riastrad 	/*
    794       1.1  riastrad 	 * Acquire the per-CPU state, blocking soft interrupts and
    795       1.1  riastrad 	 * causing hard interrupts to drop samples on the floor.
    796       1.1  riastrad 	 */
    797       1.1  riastrad 	ec = percpu_getref(entropy_percpu);
    798       1.1  riastrad 	s = splsoftserial();
    799       1.1  riastrad 	KASSERT(!ec->ec_locked);
    800       1.1  riastrad 	ec->ec_locked = true;
    801       1.1  riastrad 	__insn_barrier();
    802       1.1  riastrad 
    803       1.1  riastrad 	/* Enter into the per-CPU pool.  */
    804       1.1  riastrad 	entpool_enter(ec->ec_pool, buf, len);
    805       1.1  riastrad 
    806       1.1  riastrad 	/* Count up what we can add.  */
    807       1.1  riastrad 	pending = ec->ec_pending;
    808       1.1  riastrad 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
    809       1.1  riastrad 	atomic_store_relaxed(&ec->ec_pending, pending);
    810       1.1  riastrad 
    811       1.1  riastrad 	/* Consolidate globally if appropriate based on what we added.  */
    812       1.1  riastrad 	entropy_account_cpu(ec);
    813       1.1  riastrad 
    814       1.1  riastrad 	/* Release the per-CPU state.  */
    815       1.1  riastrad 	KASSERT(ec->ec_locked);
    816       1.1  riastrad 	__insn_barrier();
    817       1.1  riastrad 	ec->ec_locked = false;
    818       1.1  riastrad 	splx(s);
    819       1.1  riastrad 	percpu_putref(entropy_percpu);
    820       1.1  riastrad }
    821       1.1  riastrad 
    822       1.1  riastrad /*
    823       1.1  riastrad  * entropy_enter_intr(buf, len, nbits)
    824       1.1  riastrad  *
    825       1.1  riastrad  *	Enter up to len bytes of data from buf into the system's
    826       1.1  riastrad  *	entropy pool without stirring.  nbits is a lower bound on the
    827       1.1  riastrad  *	number of bits of entropy in the process that led to this
    828       1.1  riastrad  *	sample.  If the sample could be entered completely, assume
    829       1.1  riastrad  *	nbits of entropy pending; otherwise assume none, since we don't
    830       1.1  riastrad  *	know whether some parts of the sample are constant, for
    831       1.1  riastrad  *	instance.  Schedule a softint to stir the entropy pool if
    832       1.1  riastrad  *	needed.  Return true if used fully, false if truncated at all.
    833       1.1  riastrad  *
    834       1.1  riastrad  *	Using this in thread context will work, but you might as well
    835       1.1  riastrad  *	use entropy_enter in that case.
    836       1.1  riastrad  */
    837       1.1  riastrad static bool
    838       1.1  riastrad entropy_enter_intr(const void *buf, size_t len, unsigned nbits)
    839       1.1  riastrad {
    840       1.1  riastrad 	struct entropy_cpu *ec;
    841       1.1  riastrad 	bool fullyused = false;
    842       1.1  riastrad 	uint32_t pending;
    843       1.1  riastrad 
    844       1.1  riastrad 	KASSERTMSG(howmany(nbits, NBBY) <= len,
    845       1.1  riastrad 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
    846       1.1  riastrad 
    847       1.1  riastrad 	/* If it's too early after boot, just use entropy_enter_early.  */
    848       1.1  riastrad 	if (__predict_false(E->stage < ENTROPY_HOT)) {
    849       1.1  riastrad 		entropy_enter_early(buf, len, nbits);
    850       1.1  riastrad 		return true;
    851       1.1  riastrad 	}
    852       1.1  riastrad 
    853       1.1  riastrad 	/*
    854       1.1  riastrad 	 * Acquire the per-CPU state.  If someone is in the middle of
    855       1.1  riastrad 	 * using it, drop the sample.  Otherwise, take the lock so that
    856       1.1  riastrad 	 * higher-priority interrupts will drop their samples.
    857       1.1  riastrad 	 */
    858       1.1  riastrad 	ec = percpu_getref(entropy_percpu);
    859       1.1  riastrad 	if (ec->ec_locked)
    860       1.1  riastrad 		goto out0;
    861       1.1  riastrad 	ec->ec_locked = true;
    862       1.1  riastrad 	__insn_barrier();
    863       1.1  riastrad 
    864       1.1  riastrad 	/*
    865       1.1  riastrad 	 * Enter as much as we can into the per-CPU pool.  If it was
    866       1.1  riastrad 	 * truncated, schedule a softint to stir the pool and stop.
    867       1.1  riastrad 	 */
    868       1.1  riastrad 	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
    869       1.1  riastrad 		softint_schedule(entropy_sih);
    870       1.1  riastrad 		goto out1;
    871       1.1  riastrad 	}
    872       1.1  riastrad 	fullyused = true;
    873       1.1  riastrad 
    874       1.1  riastrad 	/* Count up what we can contribute.  */
    875       1.1  riastrad 	pending = ec->ec_pending;
    876       1.1  riastrad 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
    877       1.1  riastrad 	atomic_store_relaxed(&ec->ec_pending, pending);
    878       1.1  riastrad 
    879       1.1  riastrad 	/* Schedule a softint if we added anything and it matters.  */
    880       1.1  riastrad 	if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
    881       1.1  riastrad 		atomic_load_relaxed(&entropy_depletion)) &&
    882       1.1  riastrad 	    nbits != 0)
    883       1.1  riastrad 		softint_schedule(entropy_sih);
    884       1.1  riastrad 
    885       1.1  riastrad out1:	/* Release the per-CPU state.  */
    886       1.1  riastrad 	KASSERT(ec->ec_locked);
    887       1.1  riastrad 	__insn_barrier();
    888       1.1  riastrad 	ec->ec_locked = false;
    889       1.1  riastrad out0:	percpu_putref(entropy_percpu);
    890       1.1  riastrad 
    891       1.1  riastrad 	return fullyused;
    892       1.1  riastrad }
    893       1.1  riastrad 
    894       1.1  riastrad /*
    895       1.1  riastrad  * entropy_softintr(cookie)
    896       1.1  riastrad  *
    897       1.1  riastrad  *	Soft interrupt handler for entering entropy.  Takes care of
    898       1.1  riastrad  *	stirring the local CPU's entropy pool if it filled up during
    899       1.1  riastrad  *	hard interrupts, and promptly crediting entropy from the local
    900       1.1  riastrad  *	CPU's entropy pool to the global entropy pool if needed.
    901       1.1  riastrad  */
    902       1.1  riastrad static void
    903       1.1  riastrad entropy_softintr(void *cookie)
    904       1.1  riastrad {
    905       1.1  riastrad 	struct entropy_cpu *ec;
    906       1.1  riastrad 
    907       1.1  riastrad 	/*
    908       1.1  riastrad 	 * Acquire the per-CPU state.  Other users can lock this only
    909       1.1  riastrad 	 * while soft interrupts are blocked.  Cause hard interrupts to
    910       1.1  riastrad 	 * drop samples on the floor.
    911       1.1  riastrad 	 */
    912       1.1  riastrad 	ec = percpu_getref(entropy_percpu);
    913       1.1  riastrad 	KASSERT(!ec->ec_locked);
    914       1.1  riastrad 	ec->ec_locked = true;
    915       1.1  riastrad 	__insn_barrier();
    916       1.1  riastrad 
    917       1.1  riastrad 	/* Count statistics.  */
    918       1.1  riastrad 	ec->ec_softint_evcnt->ev_count++;
    919       1.1  riastrad 
    920       1.1  riastrad 	/* Stir the pool if necessary.  */
    921       1.1  riastrad 	entpool_stir(ec->ec_pool);
    922       1.1  riastrad 
    923       1.1  riastrad 	/* Consolidate globally if appropriate based on what we added.  */
    924       1.1  riastrad 	entropy_account_cpu(ec);
    925       1.1  riastrad 
    926       1.1  riastrad 	/* Release the per-CPU state.  */
    927       1.1  riastrad 	KASSERT(ec->ec_locked);
    928       1.1  riastrad 	__insn_barrier();
    929       1.1  riastrad 	ec->ec_locked = false;
    930       1.1  riastrad 	percpu_putref(entropy_percpu);
    931       1.1  riastrad }
    932       1.1  riastrad 
    933       1.1  riastrad /*
    934       1.1  riastrad  * entropy_thread(cookie)
    935       1.1  riastrad  *
    936       1.1  riastrad  *	Handle any asynchronous entropy housekeeping.
    937       1.1  riastrad  */
    938       1.1  riastrad static void
    939       1.1  riastrad entropy_thread(void *cookie)
    940       1.1  riastrad {
    941       1.3  riastrad 	bool consolidate;
    942       1.1  riastrad 
    943       1.1  riastrad 	for (;;) {
    944       1.1  riastrad 		/*
    945       1.3  riastrad 		 * Wait until there's full entropy somewhere among the
    946       1.3  riastrad 		 * CPUs, as confirmed at most once per minute, or
    947       1.3  riastrad 		 * someone wants to consolidate.
    948       1.1  riastrad 		 */
    949       1.3  riastrad 		if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) {
    950       1.3  riastrad 			consolidate = true;
    951       1.3  riastrad 		} else {
    952       1.3  riastrad 			mutex_enter(&E->lock);
    953       1.3  riastrad 			if (!E->consolidate)
    954       1.3  riastrad 				cv_timedwait(&E->cv, &E->lock, 60*hz);
    955       1.3  riastrad 			consolidate = E->consolidate;
    956       1.3  riastrad 			E->consolidate = false;
    957       1.3  riastrad 			mutex_exit(&E->lock);
    958       1.1  riastrad 		}
    959       1.1  riastrad 
    960       1.3  riastrad 		if (consolidate) {
    961       1.3  riastrad 			/* Do it.  */
    962      1.13  riastrad 			entropy_do_consolidate();
    963       1.1  riastrad 
    964       1.3  riastrad 			/* Mitigate abuse.  */
    965       1.3  riastrad 			kpause("entropy", false, hz, NULL);
    966       1.3  riastrad 		}
    967       1.1  riastrad 	}
    968       1.1  riastrad }
    969       1.1  riastrad 
    970       1.1  riastrad /*
    971       1.1  riastrad  * entropy_pending()
    972       1.1  riastrad  *
    973       1.1  riastrad  *	Count up the amount of entropy pending on other CPUs.
    974       1.1  riastrad  */
    975       1.1  riastrad static uint32_t
    976       1.1  riastrad entropy_pending(void)
    977       1.1  riastrad {
    978       1.1  riastrad 	uint32_t pending = 0;
    979       1.1  riastrad 
    980       1.1  riastrad 	percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending);
    981       1.1  riastrad 	return pending;
    982       1.1  riastrad }
    983       1.1  riastrad 
    984       1.1  riastrad static void
    985       1.1  riastrad entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    986       1.1  riastrad {
    987       1.1  riastrad 	struct entropy_cpu *ec = ptr;
    988       1.1  riastrad 	uint32_t *pendingp = cookie;
    989       1.1  riastrad 	uint32_t cpu_pending;
    990       1.1  riastrad 
    991       1.1  riastrad 	cpu_pending = atomic_load_relaxed(&ec->ec_pending);
    992       1.1  riastrad 	*pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending);
    993       1.1  riastrad }
    994       1.1  riastrad 
    995       1.1  riastrad /*
    996      1.13  riastrad  * entropy_do_consolidate()
    997       1.1  riastrad  *
    998       1.1  riastrad  *	Issue a cross-call to gather entropy on all CPUs and advance
    999       1.1  riastrad  *	the entropy epoch.
   1000       1.1  riastrad  */
   1001       1.1  riastrad static void
   1002      1.13  riastrad entropy_do_consolidate(void)
   1003       1.1  riastrad {
   1004       1.1  riastrad 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1005       1.1  riastrad 	static struct timeval lasttime; /* serialized by E->lock */
   1006      1.19  riastrad 	struct entpool pool;
   1007      1.19  riastrad 	uint8_t buf[ENTPOOL_CAPACITY];
   1008       1.1  riastrad 	unsigned diff;
   1009       1.1  riastrad 	uint64_t ticket;
   1010       1.1  riastrad 
   1011      1.19  riastrad 	/* Gather entropy on all CPUs into a temporary pool.  */
   1012      1.19  riastrad 	memset(&pool, 0, sizeof pool);
   1013      1.19  riastrad 	ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
   1014       1.1  riastrad 	xc_wait(ticket);
   1015       1.1  riastrad 
   1016       1.1  riastrad 	/* Acquire the lock to notify waiters.  */
   1017       1.1  riastrad 	mutex_enter(&E->lock);
   1018       1.1  riastrad 
   1019       1.1  riastrad 	/* Count another consolidation.  */
   1020       1.1  riastrad 	entropy_consolidate_evcnt.ev_count++;
   1021       1.1  riastrad 
   1022       1.1  riastrad 	/* Note when we last consolidated, i.e. now.  */
   1023       1.1  riastrad 	E->timestamp = time_uptime;
   1024       1.1  riastrad 
   1025      1.19  riastrad 	/* Mix what we gathered into the global pool.  */
   1026      1.19  riastrad 	entpool_extract(&pool, buf, sizeof buf);
   1027      1.19  riastrad 	entpool_enter(&E->pool, buf, sizeof buf);
   1028      1.19  riastrad 	explicit_memset(&pool, 0, sizeof pool);
   1029      1.19  riastrad 
   1030       1.1  riastrad 	/* Count the entropy that was gathered.  */
   1031       1.1  riastrad 	diff = MIN(E->needed, E->pending);
   1032       1.1  riastrad 	atomic_store_relaxed(&E->needed, E->needed - diff);
   1033       1.1  riastrad 	E->pending -= diff;
   1034       1.1  riastrad 	if (__predict_false(E->needed > 0)) {
   1035  1.24.2.2   thorpej 		if (ratecheck(&lasttime, &interval) &&
   1036  1.24.2.2   thorpej 		    (boothowto & AB_DEBUG) != 0) {
   1037       1.1  riastrad 			printf("entropy: WARNING:"
   1038       1.1  riastrad 			    " consolidating less than full entropy\n");
   1039  1.24.2.2   thorpej 		}
   1040       1.1  riastrad 	}
   1041       1.1  riastrad 
   1042       1.1  riastrad 	/* Advance the epoch and notify waiters.  */
   1043       1.1  riastrad 	entropy_notify();
   1044       1.1  riastrad 
   1045       1.1  riastrad 	/* Release the lock.  */
   1046       1.1  riastrad 	mutex_exit(&E->lock);
   1047       1.1  riastrad }
   1048       1.1  riastrad 
   1049       1.1  riastrad /*
   1050      1.20  riastrad  * entropy_consolidate_xc(vpool, arg2)
   1051       1.1  riastrad  *
   1052       1.1  riastrad  *	Extract output from the local CPU's input pool and enter it
   1053      1.20  riastrad  *	into a temporary pool passed as vpool.
   1054       1.1  riastrad  */
   1055       1.1  riastrad static void
   1056      1.19  riastrad entropy_consolidate_xc(void *vpool, void *arg2 __unused)
   1057       1.1  riastrad {
   1058      1.19  riastrad 	struct entpool *pool = vpool;
   1059       1.1  riastrad 	struct entropy_cpu *ec;
   1060       1.1  riastrad 	uint8_t buf[ENTPOOL_CAPACITY];
   1061       1.1  riastrad 	uint32_t extra[7];
   1062       1.1  riastrad 	unsigned i = 0;
   1063       1.1  riastrad 	int s;
   1064       1.1  riastrad 
   1065       1.1  riastrad 	/* Grab CPU number and cycle counter to mix extra into the pool.  */
   1066       1.1  riastrad 	extra[i++] = cpu_number();
   1067       1.1  riastrad 	extra[i++] = entropy_timer();
   1068       1.1  riastrad 
   1069       1.1  riastrad 	/*
   1070       1.1  riastrad 	 * Acquire the per-CPU state, blocking soft interrupts and
   1071       1.1  riastrad 	 * discarding entropy in hard interrupts, so that we can
   1072       1.1  riastrad 	 * extract from the per-CPU pool.
   1073       1.1  riastrad 	 */
   1074       1.1  riastrad 	ec = percpu_getref(entropy_percpu);
   1075       1.1  riastrad 	s = splsoftserial();
   1076       1.1  riastrad 	KASSERT(!ec->ec_locked);
   1077       1.1  riastrad 	ec->ec_locked = true;
   1078       1.1  riastrad 	__insn_barrier();
   1079       1.1  riastrad 	extra[i++] = entropy_timer();
   1080       1.1  riastrad 
   1081      1.12  riastrad 	/* Extract the data and count it no longer pending.  */
   1082       1.1  riastrad 	entpool_extract(ec->ec_pool, buf, sizeof buf);
   1083      1.12  riastrad 	atomic_store_relaxed(&ec->ec_pending, 0);
   1084       1.1  riastrad 	extra[i++] = entropy_timer();
   1085       1.1  riastrad 
   1086       1.1  riastrad 	/* Release the per-CPU state.  */
   1087       1.1  riastrad 	KASSERT(ec->ec_locked);
   1088       1.1  riastrad 	__insn_barrier();
   1089       1.1  riastrad 	ec->ec_locked = false;
   1090       1.1  riastrad 	splx(s);
   1091       1.1  riastrad 	percpu_putref(entropy_percpu);
   1092       1.1  riastrad 	extra[i++] = entropy_timer();
   1093       1.1  riastrad 
   1094       1.1  riastrad 	/*
   1095       1.1  riastrad 	 * Copy over statistics, and enter the per-CPU extract and the
   1096      1.19  riastrad 	 * extra timing into the temporary pool, under the global lock.
   1097       1.1  riastrad 	 */
   1098       1.1  riastrad 	mutex_enter(&E->lock);
   1099       1.1  riastrad 	extra[i++] = entropy_timer();
   1100      1.19  riastrad 	entpool_enter(pool, buf, sizeof buf);
   1101       1.1  riastrad 	explicit_memset(buf, 0, sizeof buf);
   1102       1.1  riastrad 	extra[i++] = entropy_timer();
   1103       1.1  riastrad 	KASSERT(i == __arraycount(extra));
   1104      1.19  riastrad 	entpool_enter(pool, extra, sizeof extra);
   1105       1.1  riastrad 	explicit_memset(extra, 0, sizeof extra);
   1106       1.1  riastrad 	mutex_exit(&E->lock);
   1107       1.1  riastrad }
   1108       1.1  riastrad 
   1109       1.1  riastrad /*
   1110       1.1  riastrad  * entropy_notify()
   1111       1.1  riastrad  *
   1112       1.1  riastrad  *	Caller just contributed entropy to the global pool.  Advance
   1113       1.1  riastrad  *	the entropy epoch and notify waiters.
   1114       1.1  riastrad  *
   1115       1.1  riastrad  *	Caller must hold the global entropy lock.  Except for the
   1116       1.1  riastrad  *	`sysctl -w kern.entropy.consolidate=1` trigger, the caller must
   1117       1.1  riastrad  *	have just have transitioned from partial entropy to full
   1118       1.1  riastrad  *	entropy -- E->needed should be zero now.
   1119       1.1  riastrad  */
   1120       1.1  riastrad static void
   1121       1.1  riastrad entropy_notify(void)
   1122       1.1  riastrad {
   1123      1.12  riastrad 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1124      1.12  riastrad 	static struct timeval lasttime; /* serialized by E->lock */
   1125       1.1  riastrad 	unsigned epoch;
   1126       1.1  riastrad 
   1127       1.1  riastrad 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1128       1.1  riastrad 
   1129       1.1  riastrad 	/*
   1130       1.1  riastrad 	 * If this is the first time, print a message to the console
   1131       1.1  riastrad 	 * that we're ready so operators can compare it to the timing
   1132       1.1  riastrad 	 * of other events.
   1133       1.1  riastrad 	 */
   1134      1.14  riastrad 	if (__predict_false(!rnd_initial_entropy) && E->needed == 0) {
   1135       1.1  riastrad 		printf("entropy: ready\n");
   1136      1.14  riastrad 		rnd_initial_entropy = 1;
   1137      1.14  riastrad 	}
   1138       1.1  riastrad 
   1139       1.1  riastrad 	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
   1140      1.12  riastrad 	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
   1141      1.12  riastrad 	    ratecheck(&lasttime, &interval)) {
   1142      1.12  riastrad 		epoch = E->epoch + 1;
   1143      1.12  riastrad 		if (epoch == 0 || epoch == (unsigned)-1)
   1144      1.12  riastrad 			epoch = 1;
   1145      1.12  riastrad 		atomic_store_relaxed(&E->epoch, epoch);
   1146      1.12  riastrad 	}
   1147       1.1  riastrad 
   1148       1.1  riastrad 	/* Notify waiters.  */
   1149       1.1  riastrad 	if (E->stage >= ENTROPY_WARM) {
   1150       1.1  riastrad 		cv_broadcast(&E->cv);
   1151       1.1  riastrad 		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
   1152       1.1  riastrad 	}
   1153       1.1  riastrad 
   1154       1.1  riastrad 	/* Count another notification.  */
   1155       1.1  riastrad 	entropy_notify_evcnt.ev_count++;
   1156       1.1  riastrad }
   1157       1.1  riastrad 
   1158       1.1  riastrad /*
   1159      1.13  riastrad  * entropy_consolidate()
   1160      1.13  riastrad  *
   1161      1.13  riastrad  *	Trigger entropy consolidation and wait for it to complete.
   1162      1.13  riastrad  *
   1163      1.13  riastrad  *	This should be used sparingly, not periodically -- requiring
   1164      1.13  riastrad  *	conscious intervention by the operator or a clear policy
   1165      1.13  riastrad  *	decision.  Otherwise, the kernel will automatically consolidate
   1166      1.13  riastrad  *	when enough entropy has been gathered into per-CPU pools to
   1167      1.13  riastrad  *	transition to full entropy.
   1168      1.13  riastrad  */
   1169      1.13  riastrad void
   1170      1.13  riastrad entropy_consolidate(void)
   1171      1.13  riastrad {
   1172      1.13  riastrad 	uint64_t ticket;
   1173      1.13  riastrad 	int error;
   1174      1.13  riastrad 
   1175      1.13  riastrad 	KASSERT(E->stage == ENTROPY_HOT);
   1176      1.13  riastrad 
   1177      1.13  riastrad 	mutex_enter(&E->lock);
   1178      1.13  riastrad 	ticket = entropy_consolidate_evcnt.ev_count;
   1179      1.13  riastrad 	E->consolidate = true;
   1180      1.13  riastrad 	cv_broadcast(&E->cv);
   1181      1.13  riastrad 	while (ticket == entropy_consolidate_evcnt.ev_count) {
   1182      1.13  riastrad 		error = cv_wait_sig(&E->cv, &E->lock);
   1183      1.13  riastrad 		if (error)
   1184      1.13  riastrad 			break;
   1185      1.13  riastrad 	}
   1186      1.13  riastrad 	mutex_exit(&E->lock);
   1187      1.13  riastrad }
   1188      1.13  riastrad 
   1189      1.13  riastrad /*
   1190       1.1  riastrad  * sysctl -w kern.entropy.consolidate=1
   1191       1.1  riastrad  *
   1192       1.1  riastrad  *	Trigger entropy consolidation and wait for it to complete.
   1193      1.13  riastrad  *	Writable only by superuser.  This, writing to /dev/random, and
   1194      1.13  riastrad  *	ioctl(RNDADDDATA) are the only ways for the system to
   1195      1.13  riastrad  *	consolidate entropy if the operator knows something the kernel
   1196      1.13  riastrad  *	doesn't about how unpredictable the pending entropy pools are.
   1197       1.1  riastrad  */
   1198       1.1  riastrad static int
   1199       1.1  riastrad sysctl_entropy_consolidate(SYSCTLFN_ARGS)
   1200       1.1  riastrad {
   1201       1.1  riastrad 	struct sysctlnode node = *rnode;
   1202       1.1  riastrad 	int arg;
   1203       1.1  riastrad 	int error;
   1204       1.1  riastrad 
   1205       1.1  riastrad 	KASSERT(E->stage == ENTROPY_HOT);
   1206       1.1  riastrad 
   1207       1.1  riastrad 	node.sysctl_data = &arg;
   1208       1.1  riastrad 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1209       1.1  riastrad 	if (error || newp == NULL)
   1210       1.1  riastrad 		return error;
   1211      1.13  riastrad 	if (arg)
   1212      1.13  riastrad 		entropy_consolidate();
   1213       1.1  riastrad 
   1214       1.1  riastrad 	return error;
   1215       1.1  riastrad }
   1216       1.1  riastrad 
   1217       1.1  riastrad /*
   1218      1.10  riastrad  * sysctl -w kern.entropy.gather=1
   1219      1.10  riastrad  *
   1220      1.10  riastrad  *	Trigger gathering entropy from all on-demand sources, and wait
   1221      1.10  riastrad  *	for synchronous sources (but not asynchronous sources) to
   1222      1.10  riastrad  *	complete.  Writable only by superuser.
   1223      1.10  riastrad  */
   1224      1.10  riastrad static int
   1225      1.10  riastrad sysctl_entropy_gather(SYSCTLFN_ARGS)
   1226      1.10  riastrad {
   1227      1.10  riastrad 	struct sysctlnode node = *rnode;
   1228      1.10  riastrad 	int arg;
   1229      1.10  riastrad 	int error;
   1230      1.10  riastrad 
   1231      1.10  riastrad 	KASSERT(E->stage == ENTROPY_HOT);
   1232      1.10  riastrad 
   1233      1.10  riastrad 	node.sysctl_data = &arg;
   1234      1.10  riastrad 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1235      1.10  riastrad 	if (error || newp == NULL)
   1236      1.10  riastrad 		return error;
   1237      1.10  riastrad 	if (arg) {
   1238      1.10  riastrad 		mutex_enter(&E->lock);
   1239      1.10  riastrad 		entropy_request(ENTROPY_CAPACITY);
   1240      1.10  riastrad 		mutex_exit(&E->lock);
   1241      1.10  riastrad 	}
   1242      1.10  riastrad 
   1243      1.10  riastrad 	return 0;
   1244      1.10  riastrad }
   1245      1.10  riastrad 
   1246      1.10  riastrad /*
   1247       1.1  riastrad  * entropy_extract(buf, len, flags)
   1248       1.1  riastrad  *
   1249       1.1  riastrad  *	Extract len bytes from the global entropy pool into buf.
   1250       1.1  riastrad  *
   1251       1.1  riastrad  *	Flags may have:
   1252       1.1  riastrad  *
   1253       1.1  riastrad  *		ENTROPY_WAIT	Wait for entropy if not available yet.
   1254       1.1  riastrad  *		ENTROPY_SIG	Allow interruption by a signal during wait.
   1255      1.23  riastrad  *		ENTROPY_HARDFAIL Either fill the buffer with full entropy,
   1256      1.23  riastrad  *				or fail without filling it at all.
   1257       1.1  riastrad  *
   1258       1.1  riastrad  *	Return zero on success, or error on failure:
   1259       1.1  riastrad  *
   1260       1.1  riastrad  *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
   1261       1.1  riastrad  *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
   1262       1.1  riastrad  *
   1263       1.1  riastrad  *	If ENTROPY_WAIT is set, allowed only in thread context.  If
   1264       1.1  riastrad  *	ENTROPY_WAIT is not set, allowed up to IPL_VM.  (XXX That's
   1265       1.1  riastrad  *	awfully high...  Do we really need it in hard interrupts?  This
   1266       1.1  riastrad  *	arises from use of cprng_strong(9).)
   1267       1.1  riastrad  */
   1268       1.1  riastrad int
   1269       1.1  riastrad entropy_extract(void *buf, size_t len, int flags)
   1270       1.1  riastrad {
   1271       1.1  riastrad 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
   1272       1.1  riastrad 	static struct timeval lasttime; /* serialized by E->lock */
   1273       1.1  riastrad 	int error;
   1274       1.1  riastrad 
   1275       1.1  riastrad 	if (ISSET(flags, ENTROPY_WAIT)) {
   1276       1.1  riastrad 		ASSERT_SLEEPABLE();
   1277       1.1  riastrad 		KASSERTMSG(E->stage >= ENTROPY_WARM,
   1278       1.1  riastrad 		    "can't wait for entropy until warm");
   1279       1.1  riastrad 	}
   1280       1.1  riastrad 
   1281       1.1  riastrad 	/* Acquire the global lock to get at the global pool.  */
   1282       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
   1283       1.1  riastrad 		mutex_enter(&E->lock);
   1284       1.1  riastrad 
   1285       1.1  riastrad 	/* Count up request for entropy in interrupt context.  */
   1286      1.16  riastrad 	if (cpu_intr_p())
   1287       1.1  riastrad 		entropy_extract_intr_evcnt.ev_count++;
   1288       1.1  riastrad 
   1289       1.1  riastrad 	/* Wait until there is enough entropy in the system.  */
   1290       1.1  riastrad 	error = 0;
   1291       1.1  riastrad 	while (E->needed) {
   1292       1.1  riastrad 		/* Ask for more, synchronously if possible.  */
   1293       1.1  riastrad 		entropy_request(len);
   1294       1.1  riastrad 
   1295       1.1  riastrad 		/* If we got enough, we're done.  */
   1296       1.1  riastrad 		if (E->needed == 0) {
   1297       1.1  riastrad 			KASSERT(error == 0);
   1298       1.1  riastrad 			break;
   1299       1.1  riastrad 		}
   1300       1.1  riastrad 
   1301       1.1  riastrad 		/* If not waiting, stop here.  */
   1302       1.1  riastrad 		if (!ISSET(flags, ENTROPY_WAIT)) {
   1303       1.1  riastrad 			error = EWOULDBLOCK;
   1304       1.1  riastrad 			break;
   1305       1.1  riastrad 		}
   1306       1.1  riastrad 
   1307       1.1  riastrad 		/* Wait for some entropy to come in and try again.  */
   1308       1.1  riastrad 		KASSERT(E->stage >= ENTROPY_WARM);
   1309      1.24      gson 		printf("entropy: pid %d (%s) blocking due to lack of entropy\n",
   1310      1.24      gson 		       curproc->p_pid, curproc->p_comm);
   1311      1.24      gson 
   1312       1.1  riastrad 		if (ISSET(flags, ENTROPY_SIG)) {
   1313       1.1  riastrad 			error = cv_wait_sig(&E->cv, &E->lock);
   1314       1.1  riastrad 			if (error)
   1315       1.1  riastrad 				break;
   1316       1.1  riastrad 		} else {
   1317       1.1  riastrad 			cv_wait(&E->cv, &E->lock);
   1318       1.1  riastrad 		}
   1319       1.1  riastrad 	}
   1320       1.1  riastrad 
   1321      1.23  riastrad 	/*
   1322      1.23  riastrad 	 * Count failure -- but fill the buffer nevertheless, unless
   1323      1.23  riastrad 	 * the caller specified ENTROPY_HARDFAIL.
   1324      1.23  riastrad 	 */
   1325      1.23  riastrad 	if (error) {
   1326      1.23  riastrad 		if (ISSET(flags, ENTROPY_HARDFAIL))
   1327      1.23  riastrad 			goto out;
   1328       1.1  riastrad 		entropy_extract_fail_evcnt.ev_count++;
   1329      1.23  riastrad 	}
   1330       1.1  riastrad 
   1331       1.1  riastrad 	/*
   1332       1.1  riastrad 	 * Report a warning if we have never yet reached full entropy.
   1333       1.1  riastrad 	 * This is the only case where we consider entropy to be
   1334       1.1  riastrad 	 * `depleted' without kern.entropy.depletion enabled -- when we
   1335       1.1  riastrad 	 * only have partial entropy, an adversary may be able to
   1336       1.1  riastrad 	 * narrow the state of the pool down to a small number of
   1337       1.1  riastrad 	 * possibilities; the output then enables them to confirm a
   1338       1.1  riastrad 	 * guess, reducing its entropy from the adversary's perspective
   1339       1.1  riastrad 	 * to zero.
   1340       1.1  riastrad 	 */
   1341       1.1  riastrad 	if (__predict_false(E->epoch == (unsigned)-1)) {
   1342       1.1  riastrad 		if (ratecheck(&lasttime, &interval))
   1343       1.1  riastrad 			printf("entropy: WARNING:"
   1344       1.1  riastrad 			    " extracting entropy too early\n");
   1345       1.1  riastrad 		atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY);
   1346       1.1  riastrad 	}
   1347       1.1  riastrad 
   1348       1.1  riastrad 	/* Extract data from the pool, and `deplete' if we're doing that.  */
   1349       1.1  riastrad 	entpool_extract(&E->pool, buf, len);
   1350       1.1  riastrad 	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
   1351       1.1  riastrad 	    error == 0) {
   1352       1.1  riastrad 		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
   1353       1.1  riastrad 
   1354       1.1  riastrad 		atomic_store_relaxed(&E->needed,
   1355       1.1  riastrad 		    E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost));
   1356       1.1  riastrad 		entropy_deplete_evcnt.ev_count++;
   1357       1.1  riastrad 	}
   1358       1.1  riastrad 
   1359      1.23  riastrad out:	/* Release the global lock and return the error.  */
   1360       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
   1361       1.1  riastrad 		mutex_exit(&E->lock);
   1362       1.1  riastrad 	return error;
   1363       1.1  riastrad }
   1364       1.1  riastrad 
   1365       1.1  riastrad /*
   1366       1.1  riastrad  * entropy_poll(events)
   1367       1.1  riastrad  *
   1368       1.1  riastrad  *	Return the subset of events ready, and if it is not all of
   1369       1.1  riastrad  *	events, record curlwp as waiting for entropy.
   1370       1.1  riastrad  */
   1371       1.1  riastrad int
   1372       1.1  riastrad entropy_poll(int events)
   1373       1.1  riastrad {
   1374       1.1  riastrad 	int revents = 0;
   1375       1.1  riastrad 
   1376       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1377       1.1  riastrad 
   1378       1.1  riastrad 	/* Always ready for writing.  */
   1379       1.1  riastrad 	revents |= events & (POLLOUT|POLLWRNORM);
   1380       1.1  riastrad 
   1381       1.1  riastrad 	/* Narrow it down to reads.  */
   1382       1.1  riastrad 	events &= POLLIN|POLLRDNORM;
   1383       1.1  riastrad 	if (events == 0)
   1384       1.1  riastrad 		return revents;
   1385       1.1  riastrad 
   1386       1.1  riastrad 	/*
   1387       1.1  riastrad 	 * If we have reached full entropy and we're not depleting
   1388       1.1  riastrad 	 * entropy, we are forever ready.
   1389       1.1  riastrad 	 */
   1390       1.1  riastrad 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
   1391       1.1  riastrad 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
   1392       1.1  riastrad 		return revents | events;
   1393       1.1  riastrad 
   1394       1.1  riastrad 	/*
   1395       1.1  riastrad 	 * Otherwise, check whether we need entropy under the lock.  If
   1396       1.1  riastrad 	 * we don't, we're ready; if we do, add ourselves to the queue.
   1397       1.1  riastrad 	 */
   1398       1.1  riastrad 	mutex_enter(&E->lock);
   1399       1.1  riastrad 	if (E->needed == 0)
   1400       1.1  riastrad 		revents |= events;
   1401       1.1  riastrad 	else
   1402       1.1  riastrad 		selrecord(curlwp, &E->selq);
   1403       1.1  riastrad 	mutex_exit(&E->lock);
   1404       1.1  riastrad 
   1405       1.1  riastrad 	return revents;
   1406       1.1  riastrad }
   1407       1.1  riastrad 
   1408       1.1  riastrad /*
   1409       1.1  riastrad  * filt_entropy_read_detach(kn)
   1410       1.1  riastrad  *
   1411       1.1  riastrad  *	struct filterops::f_detach callback for entropy read events:
   1412       1.1  riastrad  *	remove kn from the list of waiters.
   1413       1.1  riastrad  */
   1414       1.1  riastrad static void
   1415       1.1  riastrad filt_entropy_read_detach(struct knote *kn)
   1416       1.1  riastrad {
   1417       1.1  riastrad 
   1418       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1419       1.1  riastrad 
   1420       1.1  riastrad 	mutex_enter(&E->lock);
   1421  1.24.2.1   thorpej 	selremove_knote(&E->selq, kn);
   1422       1.1  riastrad 	mutex_exit(&E->lock);
   1423       1.1  riastrad }
   1424       1.1  riastrad 
   1425       1.1  riastrad /*
   1426       1.1  riastrad  * filt_entropy_read_event(kn, hint)
   1427       1.1  riastrad  *
   1428       1.1  riastrad  *	struct filterops::f_event callback for entropy read events:
   1429       1.1  riastrad  *	poll for entropy.  Caller must hold the global entropy lock if
   1430       1.1  riastrad  *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
   1431       1.1  riastrad  */
   1432       1.1  riastrad static int
   1433       1.1  riastrad filt_entropy_read_event(struct knote *kn, long hint)
   1434       1.1  riastrad {
   1435       1.1  riastrad 	int ret;
   1436       1.1  riastrad 
   1437       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1438       1.1  riastrad 
   1439       1.1  riastrad 	/* Acquire the lock, if caller is outside entropy subsystem.  */
   1440       1.1  riastrad 	if (hint == NOTE_SUBMIT)
   1441       1.1  riastrad 		KASSERT(mutex_owned(&E->lock));
   1442       1.1  riastrad 	else
   1443       1.1  riastrad 		mutex_enter(&E->lock);
   1444       1.1  riastrad 
   1445       1.1  riastrad 	/*
   1446       1.1  riastrad 	 * If we still need entropy, can't read anything; if not, can
   1447       1.1  riastrad 	 * read arbitrarily much.
   1448       1.1  riastrad 	 */
   1449       1.1  riastrad 	if (E->needed != 0) {
   1450       1.1  riastrad 		ret = 0;
   1451       1.1  riastrad 	} else {
   1452       1.1  riastrad 		if (atomic_load_relaxed(&entropy_depletion))
   1453       1.1  riastrad 			kn->kn_data = ENTROPY_CAPACITY*NBBY;
   1454       1.1  riastrad 		else
   1455       1.1  riastrad 			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
   1456       1.1  riastrad 		ret = 1;
   1457       1.1  riastrad 	}
   1458       1.1  riastrad 
   1459       1.1  riastrad 	/* Release the lock, if caller is outside entropy subsystem.  */
   1460       1.1  riastrad 	if (hint == NOTE_SUBMIT)
   1461       1.1  riastrad 		KASSERT(mutex_owned(&E->lock));
   1462       1.1  riastrad 	else
   1463       1.1  riastrad 		mutex_exit(&E->lock);
   1464       1.1  riastrad 
   1465       1.1  riastrad 	return ret;
   1466       1.1  riastrad }
   1467       1.1  riastrad 
   1468       1.1  riastrad static const struct filterops entropy_read_filtops = {
   1469       1.1  riastrad 	.f_isfd = 1,		/* XXX Makes sense only for /dev/u?random.  */
   1470       1.1  riastrad 	.f_attach = NULL,
   1471       1.1  riastrad 	.f_detach = filt_entropy_read_detach,
   1472       1.1  riastrad 	.f_event = filt_entropy_read_event,
   1473       1.1  riastrad };
   1474       1.1  riastrad 
   1475       1.1  riastrad /*
   1476       1.1  riastrad  * entropy_kqfilter(kn)
   1477       1.1  riastrad  *
   1478       1.1  riastrad  *	Register kn to receive entropy event notifications.  May be
   1479       1.1  riastrad  *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
   1480       1.1  riastrad  */
   1481       1.1  riastrad int
   1482       1.1  riastrad entropy_kqfilter(struct knote *kn)
   1483       1.1  riastrad {
   1484       1.1  riastrad 
   1485       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1486       1.1  riastrad 
   1487       1.1  riastrad 	switch (kn->kn_filter) {
   1488       1.1  riastrad 	case EVFILT_READ:
   1489       1.1  riastrad 		/* Enter into the global select queue.  */
   1490       1.1  riastrad 		mutex_enter(&E->lock);
   1491       1.1  riastrad 		kn->kn_fop = &entropy_read_filtops;
   1492  1.24.2.1   thorpej 		selrecord_knote(&E->selq, kn);
   1493       1.1  riastrad 		mutex_exit(&E->lock);
   1494       1.1  riastrad 		return 0;
   1495       1.1  riastrad 	case EVFILT_WRITE:
   1496       1.1  riastrad 		/* Can always dump entropy into the system.  */
   1497       1.1  riastrad 		kn->kn_fop = &seltrue_filtops;
   1498       1.1  riastrad 		return 0;
   1499       1.1  riastrad 	default:
   1500       1.1  riastrad 		return EINVAL;
   1501       1.1  riastrad 	}
   1502       1.1  riastrad }
   1503       1.1  riastrad 
   1504       1.1  riastrad /*
   1505       1.1  riastrad  * rndsource_setcb(rs, get, getarg)
   1506       1.1  riastrad  *
   1507       1.1  riastrad  *	Set the request callback for the entropy source rs, if it can
   1508       1.1  riastrad  *	provide entropy on demand.  Must precede rnd_attach_source.
   1509       1.1  riastrad  */
   1510       1.1  riastrad void
   1511       1.1  riastrad rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
   1512       1.1  riastrad     void *getarg)
   1513       1.1  riastrad {
   1514       1.1  riastrad 
   1515       1.1  riastrad 	rs->get = get;
   1516       1.1  riastrad 	rs->getarg = getarg;
   1517       1.1  riastrad }
   1518       1.1  riastrad 
   1519       1.1  riastrad /*
   1520       1.1  riastrad  * rnd_attach_source(rs, name, type, flags)
   1521       1.1  riastrad  *
   1522       1.1  riastrad  *	Attach the entropy source rs.  Must be done after
   1523       1.1  riastrad  *	rndsource_setcb, if any, and before any calls to rnd_add_data.
   1524       1.1  riastrad  */
   1525       1.1  riastrad void
   1526       1.1  riastrad rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
   1527       1.1  riastrad     uint32_t flags)
   1528       1.1  riastrad {
   1529       1.1  riastrad 	uint32_t extra[4];
   1530       1.1  riastrad 	unsigned i = 0;
   1531       1.1  riastrad 
   1532       1.1  riastrad 	/* Grab cycle counter to mix extra into the pool.  */
   1533       1.1  riastrad 	extra[i++] = entropy_timer();
   1534       1.1  riastrad 
   1535       1.1  riastrad 	/*
   1536       1.1  riastrad 	 * Apply some standard flags:
   1537       1.1  riastrad 	 *
   1538       1.1  riastrad 	 * - We do not bother with network devices by default, for
   1539       1.1  riastrad 	 *   hysterical raisins (perhaps: because it is often the case
   1540       1.1  riastrad 	 *   that an adversary can influence network packet timings).
   1541       1.1  riastrad 	 */
   1542       1.1  riastrad 	switch (type) {
   1543       1.1  riastrad 	case RND_TYPE_NET:
   1544       1.1  riastrad 		flags |= RND_FLAG_NO_COLLECT;
   1545       1.1  riastrad 		break;
   1546       1.1  riastrad 	}
   1547       1.1  riastrad 
   1548       1.1  riastrad 	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
   1549       1.1  riastrad 	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
   1550       1.1  riastrad 
   1551       1.1  riastrad 	/* Initialize the random source.  */
   1552       1.1  riastrad 	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
   1553       1.1  riastrad 	strlcpy(rs->name, name, sizeof(rs->name));
   1554  1.24.2.2   thorpej 	memset(&rs->time_delta, 0, sizeof(rs->time_delta));
   1555  1.24.2.2   thorpej 	memset(&rs->value_delta, 0, sizeof(rs->value_delta));
   1556       1.9  riastrad 	rs->total = 0;
   1557       1.1  riastrad 	rs->type = type;
   1558       1.1  riastrad 	rs->flags = flags;
   1559       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
   1560       1.1  riastrad 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
   1561       1.1  riastrad 	extra[i++] = entropy_timer();
   1562       1.1  riastrad 
   1563       1.1  riastrad 	/* Wire it into the global list of random sources.  */
   1564       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
   1565       1.1  riastrad 		mutex_enter(&E->lock);
   1566       1.1  riastrad 	LIST_INSERT_HEAD(&E->sources, rs, list);
   1567       1.1  riastrad 	if (E->stage >= ENTROPY_WARM)
   1568       1.1  riastrad 		mutex_exit(&E->lock);
   1569       1.1  riastrad 	extra[i++] = entropy_timer();
   1570       1.1  riastrad 
   1571       1.1  riastrad 	/* Request that it provide entropy ASAP, if we can.  */
   1572       1.1  riastrad 	if (ISSET(flags, RND_FLAG_HASCB))
   1573       1.1  riastrad 		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
   1574       1.1  riastrad 	extra[i++] = entropy_timer();
   1575       1.1  riastrad 
   1576       1.1  riastrad 	/* Mix the extra into the pool.  */
   1577       1.1  riastrad 	KASSERT(i == __arraycount(extra));
   1578       1.1  riastrad 	entropy_enter(extra, sizeof extra, 0);
   1579       1.1  riastrad 	explicit_memset(extra, 0, sizeof extra);
   1580       1.1  riastrad }
   1581       1.1  riastrad 
   1582       1.1  riastrad /*
   1583       1.1  riastrad  * rnd_detach_source(rs)
   1584       1.1  riastrad  *
   1585       1.1  riastrad  *	Detach the entropy source rs.  May sleep waiting for users to
   1586       1.1  riastrad  *	drain.  Further use is not allowed.
   1587       1.1  riastrad  */
   1588       1.1  riastrad void
   1589       1.1  riastrad rnd_detach_source(struct krndsource *rs)
   1590       1.1  riastrad {
   1591       1.1  riastrad 
   1592       1.1  riastrad 	/*
   1593       1.1  riastrad 	 * If we're cold (shouldn't happen, but hey), just remove it
   1594       1.1  riastrad 	 * from the list -- there's nothing allocated.
   1595       1.1  riastrad 	 */
   1596       1.1  riastrad 	if (E->stage == ENTROPY_COLD) {
   1597       1.1  riastrad 		LIST_REMOVE(rs, list);
   1598       1.1  riastrad 		return;
   1599       1.1  riastrad 	}
   1600       1.1  riastrad 
   1601       1.1  riastrad 	/* We may have to wait for entropy_request.  */
   1602       1.1  riastrad 	ASSERT_SLEEPABLE();
   1603       1.1  riastrad 
   1604       1.4  riastrad 	/* Wait until the source list is not in use, and remove it.  */
   1605       1.1  riastrad 	mutex_enter(&E->lock);
   1606       1.4  riastrad 	while (E->sourcelock)
   1607  1.24.2.2   thorpej 		cv_wait(&E->sourcelock_cv, &E->lock);
   1608       1.1  riastrad 	LIST_REMOVE(rs, list);
   1609       1.1  riastrad 	mutex_exit(&E->lock);
   1610       1.1  riastrad 
   1611       1.1  riastrad 	/* Free the per-CPU data.  */
   1612       1.1  riastrad 	percpu_free(rs->state, sizeof(struct rndsource_cpu));
   1613       1.1  riastrad }
   1614       1.1  riastrad 
   1615       1.1  riastrad /*
   1616       1.4  riastrad  * rnd_lock_sources()
   1617       1.4  riastrad  *
   1618       1.4  riastrad  *	Prevent changes to the list of rndsources while we iterate it.
   1619       1.4  riastrad  *	Interruptible.  Caller must hold the global entropy lock.  If
   1620       1.4  riastrad  *	successful, no rndsource will go away until rnd_unlock_sources
   1621       1.4  riastrad  *	even while the caller releases the global entropy lock.
   1622       1.4  riastrad  */
   1623       1.4  riastrad static int
   1624       1.4  riastrad rnd_lock_sources(void)
   1625       1.4  riastrad {
   1626       1.4  riastrad 	int error;
   1627       1.4  riastrad 
   1628       1.4  riastrad 	KASSERT(mutex_owned(&E->lock));
   1629       1.4  riastrad 
   1630       1.4  riastrad 	while (E->sourcelock) {
   1631  1.24.2.2   thorpej 		error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
   1632       1.4  riastrad 		if (error)
   1633       1.4  riastrad 			return error;
   1634       1.4  riastrad 	}
   1635       1.4  riastrad 
   1636       1.4  riastrad 	E->sourcelock = curlwp;
   1637       1.4  riastrad 	return 0;
   1638       1.4  riastrad }
   1639       1.4  riastrad 
   1640       1.4  riastrad /*
   1641       1.4  riastrad  * rnd_trylock_sources()
   1642       1.4  riastrad  *
   1643       1.4  riastrad  *	Try to lock the list of sources, but if it's already locked,
   1644       1.4  riastrad  *	fail.  Caller must hold the global entropy lock.  If
   1645       1.4  riastrad  *	successful, no rndsource will go away until rnd_unlock_sources
   1646       1.4  riastrad  *	even while the caller releases the global entropy lock.
   1647       1.4  riastrad  */
   1648       1.4  riastrad static bool
   1649       1.4  riastrad rnd_trylock_sources(void)
   1650       1.4  riastrad {
   1651       1.4  riastrad 
   1652       1.4  riastrad 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1653       1.4  riastrad 
   1654       1.4  riastrad 	if (E->sourcelock)
   1655       1.4  riastrad 		return false;
   1656      1.16  riastrad 	E->sourcelock = curlwp;
   1657       1.4  riastrad 	return true;
   1658       1.4  riastrad }
   1659       1.4  riastrad 
   1660       1.4  riastrad /*
   1661       1.4  riastrad  * rnd_unlock_sources()
   1662       1.4  riastrad  *
   1663       1.4  riastrad  *	Unlock the list of sources after rnd_lock_sources or
   1664       1.4  riastrad  *	rnd_trylock_sources.  Caller must hold the global entropy lock.
   1665       1.4  riastrad  */
   1666       1.4  riastrad static void
   1667       1.4  riastrad rnd_unlock_sources(void)
   1668       1.4  riastrad {
   1669       1.4  riastrad 
   1670       1.4  riastrad 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1671       1.4  riastrad 
   1672      1.16  riastrad 	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
   1673      1.16  riastrad 	    curlwp, E->sourcelock);
   1674       1.4  riastrad 	E->sourcelock = NULL;
   1675       1.4  riastrad 	if (E->stage >= ENTROPY_WARM)
   1676  1.24.2.2   thorpej 		cv_signal(&E->sourcelock_cv);
   1677       1.4  riastrad }
   1678       1.4  riastrad 
   1679       1.4  riastrad /*
   1680       1.4  riastrad  * rnd_sources_locked()
   1681       1.4  riastrad  *
   1682       1.4  riastrad  *	True if we hold the list of rndsources locked, for diagnostic
   1683       1.4  riastrad  *	assertions.
   1684       1.4  riastrad  */
   1685       1.7  riastrad static bool __diagused
   1686       1.4  riastrad rnd_sources_locked(void)
   1687       1.4  riastrad {
   1688       1.4  riastrad 
   1689      1.16  riastrad 	return E->sourcelock == curlwp;
   1690       1.4  riastrad }
   1691       1.4  riastrad 
   1692       1.4  riastrad /*
   1693       1.1  riastrad  * entropy_request(nbytes)
   1694       1.1  riastrad  *
   1695       1.1  riastrad  *	Request nbytes bytes of entropy from all sources in the system.
   1696       1.1  riastrad  *	OK if we overdo it.  Caller must hold the global entropy lock;
   1697       1.1  riastrad  *	will release and re-acquire it.
   1698       1.1  riastrad  */
   1699       1.1  riastrad static void
   1700       1.1  riastrad entropy_request(size_t nbytes)
   1701       1.1  riastrad {
   1702       1.4  riastrad 	struct krndsource *rs;
   1703       1.1  riastrad 
   1704       1.1  riastrad 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
   1705       1.1  riastrad 
   1706       1.1  riastrad 	/*
   1707       1.1  riastrad 	 * If there is a request in progress, let it proceed.
   1708       1.1  riastrad 	 * Otherwise, note that a request is in progress to avoid
   1709       1.1  riastrad 	 * reentry and to block rnd_detach_source until we're done.
   1710       1.1  riastrad 	 */
   1711       1.4  riastrad 	if (!rnd_trylock_sources())
   1712       1.1  riastrad 		return;
   1713       1.1  riastrad 	entropy_request_evcnt.ev_count++;
   1714       1.1  riastrad 
   1715       1.1  riastrad 	/* Clamp to the maximum reasonable request.  */
   1716       1.1  riastrad 	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
   1717       1.1  riastrad 
   1718       1.1  riastrad 	/* Walk the list of sources.  */
   1719       1.4  riastrad 	LIST_FOREACH(rs, &E->sources, list) {
   1720       1.1  riastrad 		/* Skip sources without callbacks.  */
   1721       1.1  riastrad 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
   1722       1.1  riastrad 			continue;
   1723       1.1  riastrad 
   1724      1.22  riastrad 		/*
   1725      1.22  riastrad 		 * Skip sources that are disabled altogether -- we
   1726      1.22  riastrad 		 * would just ignore their samples anyway.
   1727      1.22  riastrad 		 */
   1728      1.22  riastrad 		if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
   1729      1.22  riastrad 			continue;
   1730      1.22  riastrad 
   1731       1.1  riastrad 		/* Drop the lock while we call the callback.  */
   1732       1.1  riastrad 		if (E->stage >= ENTROPY_WARM)
   1733       1.1  riastrad 			mutex_exit(&E->lock);
   1734       1.1  riastrad 		(*rs->get)(nbytes, rs->getarg);
   1735       1.1  riastrad 		if (E->stage >= ENTROPY_WARM)
   1736       1.1  riastrad 			mutex_enter(&E->lock);
   1737       1.1  riastrad 	}
   1738       1.1  riastrad 
   1739       1.1  riastrad 	/* Notify rnd_detach_source that the request is done.  */
   1740       1.4  riastrad 	rnd_unlock_sources();
   1741       1.1  riastrad }
   1742       1.1  riastrad 
   1743       1.1  riastrad /*
   1744       1.1  riastrad  * rnd_add_uint32(rs, value)
   1745       1.1  riastrad  *
   1746       1.1  riastrad  *	Enter 32 bits of data from an entropy source into the pool.
   1747       1.1  riastrad  *
   1748       1.1  riastrad  *	If rs is NULL, may not be called from interrupt context.
   1749       1.1  riastrad  *
   1750       1.1  riastrad  *	If rs is non-NULL, may be called from any context.  May drop
   1751       1.1  riastrad  *	data if called from interrupt context.
   1752       1.1  riastrad  */
   1753       1.1  riastrad void
   1754       1.1  riastrad rnd_add_uint32(struct krndsource *rs, uint32_t value)
   1755       1.1  riastrad {
   1756       1.1  riastrad 
   1757       1.1  riastrad 	rnd_add_data(rs, &value, sizeof value, 0);
   1758       1.1  riastrad }
   1759       1.1  riastrad 
   1760       1.1  riastrad void
   1761       1.1  riastrad _rnd_add_uint32(struct krndsource *rs, uint32_t value)
   1762       1.1  riastrad {
   1763       1.1  riastrad 
   1764       1.1  riastrad 	rnd_add_data(rs, &value, sizeof value, 0);
   1765       1.1  riastrad }
   1766       1.1  riastrad 
   1767       1.1  riastrad void
   1768       1.1  riastrad _rnd_add_uint64(struct krndsource *rs, uint64_t value)
   1769       1.1  riastrad {
   1770       1.1  riastrad 
   1771       1.1  riastrad 	rnd_add_data(rs, &value, sizeof value, 0);
   1772       1.1  riastrad }
   1773       1.1  riastrad 
   1774       1.1  riastrad /*
   1775       1.1  riastrad  * rnd_add_data(rs, buf, len, entropybits)
   1776       1.1  riastrad  *
   1777       1.1  riastrad  *	Enter data from an entropy source into the pool, with a
   1778       1.1  riastrad  *	driver's estimate of how much entropy the physical source of
   1779       1.1  riastrad  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
   1780       1.1  riastrad  *	estimate and treat it as zero.
   1781       1.1  riastrad  *
   1782       1.1  riastrad  *	If rs is NULL, may not be called from interrupt context.
   1783       1.1  riastrad  *
   1784       1.1  riastrad  *	If rs is non-NULL, may be called from any context.  May drop
   1785       1.1  riastrad  *	data if called from interrupt context.
   1786       1.1  riastrad  */
   1787       1.1  riastrad void
   1788       1.1  riastrad rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
   1789       1.1  riastrad     uint32_t entropybits)
   1790       1.1  riastrad {
   1791       1.1  riastrad 	uint32_t extra;
   1792       1.1  riastrad 	uint32_t flags;
   1793       1.1  riastrad 
   1794       1.1  riastrad 	KASSERTMSG(howmany(entropybits, NBBY) <= len,
   1795       1.1  riastrad 	    "%s: impossible entropy rate:"
   1796       1.1  riastrad 	    " %"PRIu32" bits in %"PRIu32"-byte string",
   1797       1.1  riastrad 	    rs ? rs->name : "(anonymous)", entropybits, len);
   1798       1.1  riastrad 
   1799       1.1  riastrad 	/* If there's no rndsource, just enter the data and time now.  */
   1800       1.1  riastrad 	if (rs == NULL) {
   1801       1.1  riastrad 		entropy_enter(buf, len, entropybits);
   1802       1.1  riastrad 		extra = entropy_timer();
   1803       1.1  riastrad 		entropy_enter(&extra, sizeof extra, 0);
   1804       1.1  riastrad 		explicit_memset(&extra, 0, sizeof extra);
   1805       1.1  riastrad 		return;
   1806       1.1  riastrad 	}
   1807       1.1  riastrad 
   1808       1.1  riastrad 	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
   1809       1.1  riastrad 	flags = atomic_load_relaxed(&rs->flags);
   1810       1.1  riastrad 
   1811       1.1  riastrad 	/*
   1812       1.1  riastrad 	 * Skip if:
   1813       1.1  riastrad 	 * - we're not collecting entropy, or
   1814       1.1  riastrad 	 * - the operator doesn't want to collect entropy from this, or
   1815       1.1  riastrad 	 * - neither data nor timings are being collected from this.
   1816       1.1  riastrad 	 */
   1817       1.1  riastrad 	if (!atomic_load_relaxed(&entropy_collection) ||
   1818       1.1  riastrad 	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
   1819       1.1  riastrad 	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
   1820       1.1  riastrad 		return;
   1821       1.1  riastrad 
   1822       1.1  riastrad 	/* If asked, ignore the estimate.  */
   1823       1.1  riastrad 	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
   1824       1.1  riastrad 		entropybits = 0;
   1825       1.1  riastrad 
   1826       1.1  riastrad 	/* If we are collecting data, enter them.  */
   1827       1.1  riastrad 	if (ISSET(flags, RND_FLAG_COLLECT_VALUE))
   1828  1.24.2.2   thorpej 		rnd_add_data_1(rs, buf, len, entropybits,
   1829  1.24.2.2   thorpej 		    RND_FLAG_COLLECT_VALUE);
   1830       1.1  riastrad 
   1831       1.1  riastrad 	/* If we are collecting timings, enter one.  */
   1832       1.1  riastrad 	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
   1833       1.1  riastrad 		extra = entropy_timer();
   1834  1.24.2.2   thorpej 		rnd_add_data_1(rs, &extra, sizeof extra, 0,
   1835  1.24.2.2   thorpej 		    RND_FLAG_COLLECT_TIME);
   1836       1.1  riastrad 	}
   1837       1.1  riastrad }
   1838       1.1  riastrad 
   1839  1.24.2.2   thorpej static unsigned
   1840  1.24.2.2   thorpej add_sat(unsigned a, unsigned b)
   1841  1.24.2.2   thorpej {
   1842  1.24.2.2   thorpej 	unsigned c = a + b;
   1843  1.24.2.2   thorpej 
   1844  1.24.2.2   thorpej 	return (c < a ? UINT_MAX : c);
   1845  1.24.2.2   thorpej }
   1846  1.24.2.2   thorpej 
   1847       1.1  riastrad /*
   1848  1.24.2.2   thorpej  * rnd_add_data_1(rs, buf, len, entropybits, flag)
   1849       1.1  riastrad  *
   1850       1.1  riastrad  *	Internal subroutine to call either entropy_enter_intr, if we're
   1851       1.1  riastrad  *	in interrupt context, or entropy_enter if not, and to count the
   1852       1.1  riastrad  *	entropy in an rndsource.
   1853       1.1  riastrad  */
   1854       1.1  riastrad static void
   1855       1.1  riastrad rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
   1856  1.24.2.2   thorpej     uint32_t entropybits, uint32_t flag)
   1857       1.1  riastrad {
   1858       1.1  riastrad 	bool fullyused;
   1859       1.1  riastrad 
   1860       1.1  riastrad 	/*
   1861       1.1  riastrad 	 * If we're in interrupt context, use entropy_enter_intr and
   1862       1.1  riastrad 	 * take note of whether it consumed the full sample; if not,
   1863       1.1  riastrad 	 * use entropy_enter, which always consumes the full sample.
   1864       1.1  riastrad 	 */
   1865      1.16  riastrad 	if (curlwp && cpu_intr_p()) {
   1866       1.1  riastrad 		fullyused = entropy_enter_intr(buf, len, entropybits);
   1867       1.1  riastrad 	} else {
   1868       1.1  riastrad 		entropy_enter(buf, len, entropybits);
   1869       1.1  riastrad 		fullyused = true;
   1870       1.1  riastrad 	}
   1871       1.1  riastrad 
   1872       1.1  riastrad 	/*
   1873       1.1  riastrad 	 * If we used the full sample, note how many bits were
   1874       1.1  riastrad 	 * contributed from this source.
   1875       1.1  riastrad 	 */
   1876       1.1  riastrad 	if (fullyused) {
   1877       1.1  riastrad 		if (E->stage < ENTROPY_HOT) {
   1878       1.1  riastrad 			if (E->stage >= ENTROPY_WARM)
   1879       1.1  riastrad 				mutex_enter(&E->lock);
   1880  1.24.2.2   thorpej 			rs->total = add_sat(rs->total, entropybits);
   1881  1.24.2.2   thorpej 			switch (flag) {
   1882  1.24.2.2   thorpej 			case RND_FLAG_COLLECT_TIME:
   1883  1.24.2.2   thorpej 				rs->time_delta.insamples =
   1884  1.24.2.2   thorpej 				    add_sat(rs->time_delta.insamples, 1);
   1885  1.24.2.2   thorpej 				break;
   1886  1.24.2.2   thorpej 			case RND_FLAG_COLLECT_VALUE:
   1887  1.24.2.2   thorpej 				rs->value_delta.insamples =
   1888  1.24.2.2   thorpej 				    add_sat(rs->value_delta.insamples, 1);
   1889  1.24.2.2   thorpej 				break;
   1890  1.24.2.2   thorpej 			}
   1891       1.1  riastrad 			if (E->stage >= ENTROPY_WARM)
   1892       1.1  riastrad 				mutex_exit(&E->lock);
   1893       1.1  riastrad 		} else {
   1894       1.1  riastrad 			struct rndsource_cpu *rc = percpu_getref(rs->state);
   1895       1.1  riastrad 
   1896  1.24.2.2   thorpej 			atomic_store_relaxed(&rc->rc_entropybits,
   1897  1.24.2.2   thorpej 			    add_sat(rc->rc_entropybits, entropybits));
   1898  1.24.2.2   thorpej 			switch (flag) {
   1899  1.24.2.2   thorpej 			case RND_FLAG_COLLECT_TIME:
   1900  1.24.2.2   thorpej 				atomic_store_relaxed(&rc->rc_timesamples,
   1901  1.24.2.2   thorpej 				    add_sat(rc->rc_timesamples, 1));
   1902  1.24.2.2   thorpej 				break;
   1903  1.24.2.2   thorpej 			case RND_FLAG_COLLECT_VALUE:
   1904  1.24.2.2   thorpej 				atomic_store_relaxed(&rc->rc_datasamples,
   1905  1.24.2.2   thorpej 				    add_sat(rc->rc_datasamples, 1));
   1906  1.24.2.2   thorpej 				break;
   1907  1.24.2.2   thorpej 			}
   1908       1.1  riastrad 			percpu_putref(rs->state);
   1909       1.1  riastrad 		}
   1910       1.1  riastrad 	}
   1911       1.1  riastrad }
   1912       1.1  riastrad 
   1913       1.1  riastrad /*
   1914       1.1  riastrad  * rnd_add_data_sync(rs, buf, len, entropybits)
   1915       1.1  riastrad  *
   1916       1.1  riastrad  *	Same as rnd_add_data.  Originally used in rndsource callbacks,
   1917       1.1  riastrad  *	to break an unnecessary cycle; no longer really needed.
   1918       1.1  riastrad  */
   1919       1.1  riastrad void
   1920       1.1  riastrad rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
   1921       1.1  riastrad     uint32_t entropybits)
   1922       1.1  riastrad {
   1923       1.1  riastrad 
   1924       1.1  riastrad 	rnd_add_data(rs, buf, len, entropybits);
   1925       1.1  riastrad }
   1926       1.1  riastrad 
   1927       1.1  riastrad /*
   1928       1.1  riastrad  * rndsource_entropybits(rs)
   1929       1.1  riastrad  *
   1930       1.1  riastrad  *	Return approximately the number of bits of entropy that have
   1931       1.1  riastrad  *	been contributed via rs so far.  Approximate if other CPUs may
   1932       1.1  riastrad  *	be calling rnd_add_data concurrently.
   1933       1.1  riastrad  */
   1934       1.1  riastrad static unsigned
   1935       1.1  riastrad rndsource_entropybits(struct krndsource *rs)
   1936       1.1  riastrad {
   1937       1.1  riastrad 	unsigned nbits = rs->total;
   1938       1.1  riastrad 
   1939       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1940       1.4  riastrad 	KASSERT(rnd_sources_locked());
   1941       1.1  riastrad 	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
   1942       1.1  riastrad 	return nbits;
   1943       1.1  riastrad }
   1944       1.1  riastrad 
   1945       1.1  riastrad static void
   1946       1.1  riastrad rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
   1947       1.1  riastrad {
   1948       1.1  riastrad 	struct rndsource_cpu *rc = ptr;
   1949       1.1  riastrad 	unsigned *nbitsp = cookie;
   1950       1.1  riastrad 	unsigned cpu_nbits;
   1951       1.1  riastrad 
   1952  1.24.2.2   thorpej 	cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
   1953       1.1  riastrad 	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
   1954       1.1  riastrad }
   1955       1.1  riastrad 
   1956       1.1  riastrad /*
   1957       1.1  riastrad  * rndsource_to_user(rs, urs)
   1958       1.1  riastrad  *
   1959       1.1  riastrad  *	Copy a description of rs out to urs for userland.
   1960       1.1  riastrad  */
   1961       1.1  riastrad static void
   1962       1.1  riastrad rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
   1963       1.1  riastrad {
   1964       1.1  riastrad 
   1965       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1966       1.4  riastrad 	KASSERT(rnd_sources_locked());
   1967       1.1  riastrad 
   1968       1.1  riastrad 	/* Avoid kernel memory disclosure.  */
   1969       1.1  riastrad 	memset(urs, 0, sizeof(*urs));
   1970       1.1  riastrad 
   1971       1.1  riastrad 	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
   1972       1.1  riastrad 	strlcpy(urs->name, rs->name, sizeof(urs->name));
   1973       1.1  riastrad 	urs->total = rndsource_entropybits(rs);
   1974       1.1  riastrad 	urs->type = rs->type;
   1975       1.1  riastrad 	urs->flags = atomic_load_relaxed(&rs->flags);
   1976       1.1  riastrad }
   1977       1.1  riastrad 
   1978       1.1  riastrad /*
   1979       1.1  riastrad  * rndsource_to_user_est(rs, urse)
   1980       1.1  riastrad  *
   1981       1.1  riastrad  *	Copy a description of rs and estimation statistics out to urse
   1982       1.1  riastrad  *	for userland.
   1983       1.1  riastrad  */
   1984       1.1  riastrad static void
   1985       1.1  riastrad rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
   1986       1.1  riastrad {
   1987       1.1  riastrad 
   1988       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   1989       1.4  riastrad 	KASSERT(rnd_sources_locked());
   1990       1.1  riastrad 
   1991       1.1  riastrad 	/* Avoid kernel memory disclosure.  */
   1992       1.1  riastrad 	memset(urse, 0, sizeof(*urse));
   1993       1.1  riastrad 
   1994       1.1  riastrad 	/* Copy out the rndsource description.  */
   1995       1.1  riastrad 	rndsource_to_user(rs, &urse->rt);
   1996       1.1  riastrad 
   1997  1.24.2.2   thorpej 	/* Gather the statistics.  */
   1998  1.24.2.2   thorpej 	urse->dt_samples = rs->time_delta.insamples;
   1999       1.1  riastrad 	urse->dt_total = 0;
   2000  1.24.2.2   thorpej 	urse->dv_samples = rs->value_delta.insamples;
   2001  1.24.2.2   thorpej 	urse->dv_total = urse->rt.total;
   2002  1.24.2.2   thorpej 	percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
   2003  1.24.2.2   thorpej }
   2004  1.24.2.2   thorpej 
   2005  1.24.2.2   thorpej static void
   2006  1.24.2.2   thorpej rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
   2007  1.24.2.2   thorpej {
   2008  1.24.2.2   thorpej 	struct rndsource_cpu *rc = ptr;
   2009  1.24.2.2   thorpej 	rndsource_est_t *urse = cookie;
   2010  1.24.2.2   thorpej 
   2011  1.24.2.2   thorpej 	urse->dt_samples = add_sat(urse->dt_samples,
   2012  1.24.2.2   thorpej 	    atomic_load_relaxed(&rc->rc_timesamples));
   2013  1.24.2.2   thorpej 	urse->dv_samples = add_sat(urse->dv_samples,
   2014  1.24.2.2   thorpej 	    atomic_load_relaxed(&rc->rc_datasamples));
   2015       1.1  riastrad }
   2016       1.1  riastrad 
   2017       1.1  riastrad /*
   2018      1.21  riastrad  * entropy_reset_xc(arg1, arg2)
   2019      1.21  riastrad  *
   2020      1.21  riastrad  *	Reset the current CPU's pending entropy to zero.
   2021      1.21  riastrad  */
   2022      1.21  riastrad static void
   2023      1.21  riastrad entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
   2024      1.21  riastrad {
   2025      1.21  riastrad 	uint32_t extra = entropy_timer();
   2026      1.21  riastrad 	struct entropy_cpu *ec;
   2027      1.21  riastrad 	int s;
   2028      1.21  riastrad 
   2029      1.21  riastrad 	/*
   2030      1.21  riastrad 	 * Acquire the per-CPU state, blocking soft interrupts and
   2031      1.21  riastrad 	 * causing hard interrupts to drop samples on the floor.
   2032      1.21  riastrad 	 */
   2033      1.21  riastrad 	ec = percpu_getref(entropy_percpu);
   2034      1.21  riastrad 	s = splsoftserial();
   2035      1.21  riastrad 	KASSERT(!ec->ec_locked);
   2036      1.21  riastrad 	ec->ec_locked = true;
   2037      1.21  riastrad 	__insn_barrier();
   2038      1.21  riastrad 
   2039      1.21  riastrad 	/* Zero the pending count and enter a cycle count for fun.  */
   2040      1.21  riastrad 	ec->ec_pending = 0;
   2041      1.21  riastrad 	entpool_enter(ec->ec_pool, &extra, sizeof extra);
   2042      1.21  riastrad 
   2043      1.21  riastrad 	/* Release the per-CPU state.  */
   2044      1.21  riastrad 	KASSERT(ec->ec_locked);
   2045      1.21  riastrad 	__insn_barrier();
   2046      1.21  riastrad 	ec->ec_locked = false;
   2047      1.21  riastrad 	splx(s);
   2048      1.21  riastrad 	percpu_putref(entropy_percpu);
   2049      1.21  riastrad }
   2050      1.21  riastrad 
   2051      1.21  riastrad /*
   2052       1.1  riastrad  * entropy_ioctl(cmd, data)
   2053       1.1  riastrad  *
   2054       1.1  riastrad  *	Handle various /dev/random ioctl queries.
   2055       1.1  riastrad  */
   2056       1.1  riastrad int
   2057       1.1  riastrad entropy_ioctl(unsigned long cmd, void *data)
   2058       1.1  riastrad {
   2059       1.1  riastrad 	struct krndsource *rs;
   2060       1.1  riastrad 	bool privileged;
   2061       1.1  riastrad 	int error;
   2062       1.1  riastrad 
   2063       1.1  riastrad 	KASSERT(E->stage >= ENTROPY_WARM);
   2064       1.1  riastrad 
   2065       1.1  riastrad 	/* Verify user's authorization to perform the ioctl.  */
   2066       1.1  riastrad 	switch (cmd) {
   2067       1.1  riastrad 	case RNDGETENTCNT:
   2068       1.1  riastrad 	case RNDGETPOOLSTAT:
   2069       1.1  riastrad 	case RNDGETSRCNUM:
   2070       1.1  riastrad 	case RNDGETSRCNAME:
   2071       1.1  riastrad 	case RNDGETESTNUM:
   2072       1.1  riastrad 	case RNDGETESTNAME:
   2073       1.1  riastrad 		error = kauth_authorize_device(curlwp->l_cred,
   2074       1.1  riastrad 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
   2075       1.1  riastrad 		break;
   2076       1.1  riastrad 	case RNDCTL:
   2077       1.1  riastrad 		error = kauth_authorize_device(curlwp->l_cred,
   2078       1.1  riastrad 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
   2079       1.1  riastrad 		break;
   2080       1.1  riastrad 	case RNDADDDATA:
   2081       1.1  riastrad 		error = kauth_authorize_device(curlwp->l_cred,
   2082       1.1  riastrad 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
   2083       1.1  riastrad 		/* Ascertain whether the user's inputs should be counted.  */
   2084       1.1  riastrad 		if (kauth_authorize_device(curlwp->l_cred,
   2085       1.1  riastrad 			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
   2086       1.1  riastrad 			NULL, NULL, NULL, NULL) == 0)
   2087       1.1  riastrad 			privileged = true;
   2088       1.1  riastrad 		break;
   2089       1.1  riastrad 	default: {
   2090       1.1  riastrad 		/*
   2091       1.1  riastrad 		 * XXX Hack to avoid changing module ABI so this can be
   2092       1.1  riastrad 		 * pulled up.  Later, we can just remove the argument.
   2093       1.1  riastrad 		 */
   2094       1.1  riastrad 		static const struct fileops fops = {
   2095       1.1  riastrad 			.fo_ioctl = rnd_system_ioctl,
   2096       1.1  riastrad 		};
   2097       1.1  riastrad 		struct file f = {
   2098       1.1  riastrad 			.f_ops = &fops,
   2099       1.1  riastrad 		};
   2100       1.1  riastrad 		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
   2101       1.1  riastrad 		    enosys(), error);
   2102       1.1  riastrad #if defined(_LP64)
   2103       1.1  riastrad 		if (error == ENOSYS)
   2104       1.1  riastrad 			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
   2105       1.1  riastrad 			    enosys(), error);
   2106       1.1  riastrad #endif
   2107       1.1  riastrad 		if (error == ENOSYS)
   2108       1.1  riastrad 			error = ENOTTY;
   2109       1.1  riastrad 		break;
   2110       1.1  riastrad 	}
   2111       1.1  riastrad 	}
   2112       1.1  riastrad 
   2113       1.1  riastrad 	/* If anything went wrong with authorization, stop here.  */
   2114       1.1  riastrad 	if (error)
   2115       1.1  riastrad 		return error;
   2116       1.1  riastrad 
   2117       1.1  riastrad 	/* Dispatch on the command.  */
   2118       1.1  riastrad 	switch (cmd) {
   2119       1.1  riastrad 	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
   2120       1.1  riastrad 		uint32_t *countp = data;
   2121       1.1  riastrad 
   2122       1.1  riastrad 		mutex_enter(&E->lock);
   2123       1.1  riastrad 		*countp = ENTROPY_CAPACITY*NBBY - E->needed;
   2124       1.1  riastrad 		mutex_exit(&E->lock);
   2125       1.1  riastrad 
   2126       1.1  riastrad 		break;
   2127       1.1  riastrad 	}
   2128       1.1  riastrad 	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
   2129       1.1  riastrad 		rndpoolstat_t *pstat = data;
   2130       1.1  riastrad 
   2131       1.1  riastrad 		mutex_enter(&E->lock);
   2132       1.1  riastrad 
   2133       1.1  riastrad 		/* parameters */
   2134       1.1  riastrad 		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
   2135       1.1  riastrad 		pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */
   2136       1.1  riastrad 		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
   2137       1.1  riastrad 
   2138       1.1  riastrad 		/* state */
   2139       1.1  riastrad 		pstat->added = 0; /* XXX total entropy_enter count */
   2140       1.1  riastrad 		pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed;
   2141       1.1  riastrad 		pstat->removed = 0; /* XXX total entropy_extract count */
   2142       1.1  riastrad 		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
   2143       1.1  riastrad 		pstat->generated = 0; /* XXX bits of data...fabricated? */
   2144       1.1  riastrad 
   2145       1.1  riastrad 		mutex_exit(&E->lock);
   2146       1.1  riastrad 		break;
   2147       1.1  riastrad 	}
   2148       1.1  riastrad 	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
   2149       1.1  riastrad 		rndstat_t *stat = data;
   2150       1.1  riastrad 		uint32_t start = 0, i = 0;
   2151       1.1  riastrad 
   2152       1.1  riastrad 		/* Skip if none requested; fail if too many requested.  */
   2153       1.1  riastrad 		if (stat->count == 0)
   2154       1.1  riastrad 			break;
   2155       1.1  riastrad 		if (stat->count > RND_MAXSTATCOUNT)
   2156       1.1  riastrad 			return EINVAL;
   2157       1.1  riastrad 
   2158       1.1  riastrad 		/*
   2159       1.1  riastrad 		 * Under the lock, find the first one, copy out as many
   2160       1.1  riastrad 		 * as requested, and report how many we copied out.
   2161       1.1  riastrad 		 */
   2162       1.1  riastrad 		mutex_enter(&E->lock);
   2163       1.4  riastrad 		error = rnd_lock_sources();
   2164       1.4  riastrad 		if (error) {
   2165       1.4  riastrad 			mutex_exit(&E->lock);
   2166       1.4  riastrad 			return error;
   2167       1.4  riastrad 		}
   2168       1.1  riastrad 		LIST_FOREACH(rs, &E->sources, list) {
   2169       1.1  riastrad 			if (start++ == stat->start)
   2170       1.1  riastrad 				break;
   2171       1.1  riastrad 		}
   2172       1.1  riastrad 		while (i < stat->count && rs != NULL) {
   2173       1.5  riastrad 			mutex_exit(&E->lock);
   2174       1.1  riastrad 			rndsource_to_user(rs, &stat->source[i++]);
   2175       1.5  riastrad 			mutex_enter(&E->lock);
   2176       1.1  riastrad 			rs = LIST_NEXT(rs, list);
   2177       1.1  riastrad 		}
   2178       1.1  riastrad 		KASSERT(i <= stat->count);
   2179       1.1  riastrad 		stat->count = i;
   2180       1.4  riastrad 		rnd_unlock_sources();
   2181       1.1  riastrad 		mutex_exit(&E->lock);
   2182       1.1  riastrad 		break;
   2183       1.1  riastrad 	}
   2184       1.1  riastrad 	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
   2185       1.1  riastrad 		rndstat_est_t *estat = data;
   2186       1.1  riastrad 		uint32_t start = 0, i = 0;
   2187       1.1  riastrad 
   2188       1.1  riastrad 		/* Skip if none requested; fail if too many requested.  */
   2189       1.1  riastrad 		if (estat->count == 0)
   2190       1.1  riastrad 			break;
   2191       1.1  riastrad 		if (estat->count > RND_MAXSTATCOUNT)
   2192       1.1  riastrad 			return EINVAL;
   2193       1.1  riastrad 
   2194       1.1  riastrad 		/*
   2195       1.1  riastrad 		 * Under the lock, find the first one, copy out as many
   2196       1.1  riastrad 		 * as requested, and report how many we copied out.
   2197       1.1  riastrad 		 */
   2198       1.1  riastrad 		mutex_enter(&E->lock);
   2199       1.4  riastrad 		error = rnd_lock_sources();
   2200       1.4  riastrad 		if (error) {
   2201       1.4  riastrad 			mutex_exit(&E->lock);
   2202       1.4  riastrad 			return error;
   2203       1.4  riastrad 		}
   2204       1.1  riastrad 		LIST_FOREACH(rs, &E->sources, list) {
   2205       1.1  riastrad 			if (start++ == estat->start)
   2206       1.1  riastrad 				break;
   2207       1.1  riastrad 		}
   2208       1.1  riastrad 		while (i < estat->count && rs != NULL) {
   2209       1.4  riastrad 			mutex_exit(&E->lock);
   2210       1.1  riastrad 			rndsource_to_user_est(rs, &estat->source[i++]);
   2211       1.4  riastrad 			mutex_enter(&E->lock);
   2212       1.1  riastrad 			rs = LIST_NEXT(rs, list);
   2213       1.1  riastrad 		}
   2214       1.1  riastrad 		KASSERT(i <= estat->count);
   2215       1.1  riastrad 		estat->count = i;
   2216       1.4  riastrad 		rnd_unlock_sources();
   2217       1.1  riastrad 		mutex_exit(&E->lock);
   2218       1.1  riastrad 		break;
   2219       1.1  riastrad 	}
   2220       1.1  riastrad 	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
   2221       1.1  riastrad 		rndstat_name_t *nstat = data;
   2222       1.1  riastrad 		const size_t n = sizeof(rs->name);
   2223       1.1  riastrad 
   2224       1.1  riastrad 		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
   2225       1.1  riastrad 
   2226       1.1  riastrad 		/*
   2227       1.1  riastrad 		 * Under the lock, search by name.  If found, copy it
   2228       1.1  riastrad 		 * out; if not found, fail with ENOENT.
   2229       1.1  riastrad 		 */
   2230       1.1  riastrad 		mutex_enter(&E->lock);
   2231       1.4  riastrad 		error = rnd_lock_sources();
   2232       1.4  riastrad 		if (error) {
   2233       1.4  riastrad 			mutex_exit(&E->lock);
   2234       1.4  riastrad 			return error;
   2235       1.4  riastrad 		}
   2236       1.1  riastrad 		LIST_FOREACH(rs, &E->sources, list) {
   2237       1.1  riastrad 			if (strncmp(rs->name, nstat->name, n) == 0)
   2238       1.1  riastrad 				break;
   2239       1.1  riastrad 		}
   2240       1.4  riastrad 		if (rs != NULL) {
   2241       1.4  riastrad 			mutex_exit(&E->lock);
   2242       1.1  riastrad 			rndsource_to_user(rs, &nstat->source);
   2243       1.4  riastrad 			mutex_enter(&E->lock);
   2244       1.4  riastrad 		} else {
   2245       1.1  riastrad 			error = ENOENT;
   2246       1.4  riastrad 		}
   2247       1.4  riastrad 		rnd_unlock_sources();
   2248       1.1  riastrad 		mutex_exit(&E->lock);
   2249       1.1  riastrad 		break;
   2250       1.1  riastrad 	}
   2251       1.1  riastrad 	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
   2252       1.1  riastrad 		rndstat_est_name_t *enstat = data;
   2253       1.1  riastrad 		const size_t n = sizeof(rs->name);
   2254       1.1  riastrad 
   2255       1.1  riastrad 		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
   2256       1.1  riastrad 
   2257       1.1  riastrad 		/*
   2258       1.1  riastrad 		 * Under the lock, search by name.  If found, copy it
   2259       1.1  riastrad 		 * out; if not found, fail with ENOENT.
   2260       1.1  riastrad 		 */
   2261       1.1  riastrad 		mutex_enter(&E->lock);
   2262       1.4  riastrad 		error = rnd_lock_sources();
   2263       1.4  riastrad 		if (error) {
   2264       1.4  riastrad 			mutex_exit(&E->lock);
   2265       1.4  riastrad 			return error;
   2266       1.4  riastrad 		}
   2267       1.1  riastrad 		LIST_FOREACH(rs, &E->sources, list) {
   2268       1.1  riastrad 			if (strncmp(rs->name, enstat->name, n) == 0)
   2269       1.1  riastrad 				break;
   2270       1.1  riastrad 		}
   2271       1.4  riastrad 		if (rs != NULL) {
   2272       1.4  riastrad 			mutex_exit(&E->lock);
   2273       1.1  riastrad 			rndsource_to_user_est(rs, &enstat->source);
   2274       1.4  riastrad 			mutex_enter(&E->lock);
   2275       1.4  riastrad 		} else {
   2276       1.1  riastrad 			error = ENOENT;
   2277       1.4  riastrad 		}
   2278       1.4  riastrad 		rnd_unlock_sources();
   2279       1.1  riastrad 		mutex_exit(&E->lock);
   2280       1.1  riastrad 		break;
   2281       1.1  riastrad 	}
   2282       1.1  riastrad 	case RNDCTL: {		/* Modify entropy source flags.  */
   2283       1.1  riastrad 		rndctl_t *rndctl = data;
   2284       1.1  riastrad 		const size_t n = sizeof(rs->name);
   2285      1.21  riastrad 		uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
   2286       1.1  riastrad 		uint32_t flags;
   2287      1.21  riastrad 		bool reset = false, request = false;
   2288       1.1  riastrad 
   2289       1.1  riastrad 		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
   2290       1.1  riastrad 
   2291       1.1  riastrad 		/* Whitelist the flags that user can change.  */
   2292       1.1  riastrad 		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
   2293       1.1  riastrad 
   2294       1.1  riastrad 		/*
   2295       1.1  riastrad 		 * For each matching rndsource, either by type if
   2296       1.1  riastrad 		 * specified or by name if not, set the masked flags.
   2297       1.1  riastrad 		 */
   2298       1.1  riastrad 		mutex_enter(&E->lock);
   2299       1.1  riastrad 		LIST_FOREACH(rs, &E->sources, list) {
   2300       1.1  riastrad 			if (rndctl->type != 0xff) {
   2301       1.1  riastrad 				if (rs->type != rndctl->type)
   2302       1.1  riastrad 					continue;
   2303       1.1  riastrad 			} else {
   2304       1.1  riastrad 				if (strncmp(rs->name, rndctl->name, n) != 0)
   2305       1.1  riastrad 					continue;
   2306       1.1  riastrad 			}
   2307       1.1  riastrad 			flags = rs->flags & ~rndctl->mask;
   2308       1.1  riastrad 			flags |= rndctl->flags & rndctl->mask;
   2309      1.21  riastrad 			if ((rs->flags & resetflags) == 0 &&
   2310      1.21  riastrad 			    (flags & resetflags) != 0)
   2311      1.21  riastrad 				reset = true;
   2312      1.21  riastrad 			if ((rs->flags ^ flags) & resetflags)
   2313      1.21  riastrad 				request = true;
   2314       1.1  riastrad 			atomic_store_relaxed(&rs->flags, flags);
   2315       1.1  riastrad 		}
   2316       1.1  riastrad 		mutex_exit(&E->lock);
   2317      1.21  riastrad 
   2318      1.21  riastrad 		/*
   2319      1.21  riastrad 		 * If we disabled estimation or collection, nix all the
   2320      1.21  riastrad 		 * pending entropy and set needed to the maximum.
   2321      1.21  riastrad 		 */
   2322      1.21  riastrad 		if (reset) {
   2323      1.21  riastrad 			xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
   2324      1.21  riastrad 			mutex_enter(&E->lock);
   2325      1.21  riastrad 			E->pending = 0;
   2326      1.21  riastrad 			atomic_store_relaxed(&E->needed,
   2327      1.21  riastrad 			    ENTROPY_CAPACITY*NBBY);
   2328      1.21  riastrad 			mutex_exit(&E->lock);
   2329      1.21  riastrad 		}
   2330      1.21  riastrad 
   2331      1.21  riastrad 		/*
   2332      1.21  riastrad 		 * If we changed any of the estimation or collection
   2333      1.21  riastrad 		 * flags, request new samples from everyone -- either
   2334      1.21  riastrad 		 * to make up for what we just lost, or to get new
   2335      1.21  riastrad 		 * samples from what we just added.
   2336      1.21  riastrad 		 */
   2337      1.21  riastrad 		if (request) {
   2338      1.21  riastrad 			mutex_enter(&E->lock);
   2339      1.21  riastrad 			entropy_request(ENTROPY_CAPACITY);
   2340      1.21  riastrad 			mutex_exit(&E->lock);
   2341      1.21  riastrad 		}
   2342       1.1  riastrad 		break;
   2343       1.1  riastrad 	}
   2344       1.1  riastrad 	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
   2345       1.1  riastrad 		rnddata_t *rdata = data;
   2346       1.1  riastrad 		unsigned entropybits = 0;
   2347       1.1  riastrad 
   2348       1.1  riastrad 		if (!atomic_load_relaxed(&entropy_collection))
   2349       1.1  riastrad 			break;	/* thanks but no thanks */
   2350       1.1  riastrad 		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
   2351       1.1  riastrad 			return EINVAL;
   2352       1.1  riastrad 
   2353       1.1  riastrad 		/*
   2354       1.1  riastrad 		 * This ioctl serves as the userland alternative a
   2355       1.1  riastrad 		 * bootloader-provided seed -- typically furnished by
   2356       1.1  riastrad 		 * /etc/rc.d/random_seed.  We accept the user's entropy
   2357       1.1  riastrad 		 * claim only if
   2358       1.1  riastrad 		 *
   2359       1.1  riastrad 		 * (a) the user is privileged, and
   2360       1.1  riastrad 		 * (b) we have not entered a bootloader seed.
   2361       1.1  riastrad 		 *
   2362       1.1  riastrad 		 * under the assumption that the user may use this to
   2363       1.1  riastrad 		 * load a seed from disk that we have already loaded
   2364       1.1  riastrad 		 * from the bootloader, so we don't double-count it.
   2365       1.1  riastrad 		 */
   2366      1.11  riastrad 		if (privileged && rdata->entropy && rdata->len) {
   2367       1.1  riastrad 			mutex_enter(&E->lock);
   2368       1.1  riastrad 			if (!E->seeded) {
   2369       1.1  riastrad 				entropybits = MIN(rdata->entropy,
   2370       1.1  riastrad 				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
   2371       1.1  riastrad 				E->seeded = true;
   2372       1.1  riastrad 			}
   2373       1.1  riastrad 			mutex_exit(&E->lock);
   2374       1.1  riastrad 		}
   2375       1.1  riastrad 
   2376      1.13  riastrad 		/* Enter the data and consolidate entropy.  */
   2377       1.1  riastrad 		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
   2378       1.1  riastrad 		    entropybits);
   2379      1.13  riastrad 		entropy_consolidate();
   2380       1.1  riastrad 		break;
   2381       1.1  riastrad 	}
   2382       1.1  riastrad 	default:
   2383       1.1  riastrad 		error = ENOTTY;
   2384       1.1  riastrad 	}
   2385       1.1  riastrad 
   2386       1.1  riastrad 	/* Return any error that may have come up.  */
   2387       1.1  riastrad 	return error;
   2388       1.1  riastrad }
   2389       1.1  riastrad 
   2390       1.1  riastrad /* Legacy entry points */
   2391       1.1  riastrad 
   2392       1.1  riastrad void
   2393       1.1  riastrad rnd_seed(void *seed, size_t len)
   2394       1.1  riastrad {
   2395       1.1  riastrad 
   2396       1.1  riastrad 	if (len != sizeof(rndsave_t)) {
   2397       1.1  riastrad 		printf("entropy: invalid seed length: %zu,"
   2398       1.1  riastrad 		    " expected sizeof(rndsave_t) = %zu\n",
   2399       1.1  riastrad 		    len, sizeof(rndsave_t));
   2400       1.1  riastrad 		return;
   2401       1.1  riastrad 	}
   2402       1.1  riastrad 	entropy_seed(seed);
   2403       1.1  riastrad }
   2404       1.1  riastrad 
   2405       1.1  riastrad void
   2406       1.1  riastrad rnd_init(void)
   2407       1.1  riastrad {
   2408       1.1  riastrad 
   2409       1.1  riastrad 	entropy_init();
   2410       1.1  riastrad }
   2411       1.1  riastrad 
   2412       1.1  riastrad void
   2413       1.1  riastrad rnd_init_softint(void)
   2414       1.1  riastrad {
   2415       1.1  riastrad 
   2416       1.1  riastrad 	entropy_init_late();
   2417       1.1  riastrad }
   2418       1.1  riastrad 
   2419       1.1  riastrad int
   2420       1.1  riastrad rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
   2421       1.1  riastrad {
   2422       1.1  riastrad 
   2423       1.1  riastrad 	return entropy_ioctl(cmd, data);
   2424       1.1  riastrad }
   2425