1 1.73 riastrad /* $NetBSD: kern_entropy.c,v 1.73 2025/03/11 14:30:28 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /*- 4 1.1 riastrad * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 1.1 riastrad * All rights reserved. 6 1.1 riastrad * 7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation 8 1.1 riastrad * by Taylor R. Campbell. 9 1.1 riastrad * 10 1.1 riastrad * Redistribution and use in source and binary forms, with or without 11 1.1 riastrad * modification, are permitted provided that the following conditions 12 1.1 riastrad * are met: 13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright 14 1.1 riastrad * notice, this list of conditions and the following disclaimer. 15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the 17 1.1 riastrad * documentation and/or other materials provided with the distribution. 18 1.1 riastrad * 19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE. 30 1.1 riastrad */ 31 1.1 riastrad 32 1.1 riastrad /* 33 1.1 riastrad * Entropy subsystem 34 1.1 riastrad * 35 1.1 riastrad * * Each CPU maintains a per-CPU entropy pool so that gathering 36 1.1 riastrad * entropy requires no interprocessor synchronization, except 37 1.1 riastrad * early at boot when we may be scrambling to gather entropy as 38 1.1 riastrad * soon as possible. 39 1.1 riastrad * 40 1.1 riastrad * - entropy_enter gathers entropy and never drops it on the 41 1.1 riastrad * floor, at the cost of sometimes having to do cryptography. 42 1.1 riastrad * 43 1.1 riastrad * - entropy_enter_intr gathers entropy or drops it on the 44 1.1 riastrad * floor, with low latency. Work to stir the pool or kick the 45 1.1 riastrad * housekeeping thread is scheduled in soft interrupts. 46 1.1 riastrad * 47 1.1 riastrad * * entropy_enter immediately enters into the global pool if it 48 1.1 riastrad * can transition to full entropy in one swell foop. Otherwise, 49 1.1 riastrad * it defers to a housekeeping thread that consolidates entropy, 50 1.1 riastrad * but only when the CPUs collectively have full entropy, in 51 1.1 riastrad * order to mitigate iterative-guessing attacks. 52 1.1 riastrad * 53 1.1 riastrad * * The entropy housekeeping thread continues to consolidate 54 1.1 riastrad * entropy even after we think we have full entropy, in case we 55 1.1 riastrad * are wrong, but is limited to one discretionary consolidation 56 1.1 riastrad * per minute, and only when new entropy is actually coming in, 57 1.1 riastrad * to limit performance impact. 58 1.1 riastrad * 59 1.1 riastrad * * The entropy epoch is the number that changes when we 60 1.1 riastrad * transition from partial entropy to full entropy, so that 61 1.1 riastrad * users can easily determine when to reseed. This also 62 1.1 riastrad * facilitates an operator explicitly causing everything to 63 1.13 riastrad * reseed by sysctl -w kern.entropy.consolidate=1. 64 1.1 riastrad * 65 1.1 riastrad * * Entropy depletion is available for testing (or if you're into 66 1.1 riastrad * that sort of thing), with sysctl -w kern.entropy.depletion=1; 67 1.1 riastrad * the logic to support it is small, to minimize chance of bugs. 68 1.63 riastrad * 69 1.63 riastrad * * While cold, a single global entropy pool is available for 70 1.63 riastrad * entering and extracting, serialized through splhigh/splx. 71 1.63 riastrad * The per-CPU entropy pool data structures are initialized in 72 1.63 riastrad * entropy_init and entropy_init_late (separated mainly for 73 1.63 riastrad * hysterical raisins at this point), but are not used until the 74 1.63 riastrad * system is warm, at which point access to the global entropy 75 1.63 riastrad * pool is limited to thread and softint context and serialized 76 1.63 riastrad * by E->lock. 77 1.1 riastrad */ 78 1.1 riastrad 79 1.1 riastrad #include <sys/cdefs.h> 80 1.73 riastrad __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.73 2025/03/11 14:30:28 riastradh Exp $"); 81 1.1 riastrad 82 1.1 riastrad #include <sys/param.h> 83 1.1 riastrad #include <sys/types.h> 84 1.1 riastrad #include <sys/atomic.h> 85 1.1 riastrad #include <sys/compat_stub.h> 86 1.1 riastrad #include <sys/condvar.h> 87 1.1 riastrad #include <sys/cpu.h> 88 1.1 riastrad #include <sys/entropy.h> 89 1.1 riastrad #include <sys/errno.h> 90 1.1 riastrad #include <sys/evcnt.h> 91 1.1 riastrad #include <sys/event.h> 92 1.1 riastrad #include <sys/file.h> 93 1.1 riastrad #include <sys/intr.h> 94 1.1 riastrad #include <sys/kauth.h> 95 1.1 riastrad #include <sys/kernel.h> 96 1.1 riastrad #include <sys/kmem.h> 97 1.1 riastrad #include <sys/kthread.h> 98 1.53 riastrad #include <sys/lwp.h> 99 1.1 riastrad #include <sys/module_hook.h> 100 1.1 riastrad #include <sys/mutex.h> 101 1.1 riastrad #include <sys/percpu.h> 102 1.1 riastrad #include <sys/poll.h> 103 1.53 riastrad #include <sys/proc.h> 104 1.1 riastrad #include <sys/queue.h> 105 1.30 jmcneill #include <sys/reboot.h> 106 1.1 riastrad #include <sys/rnd.h> /* legacy kernel API */ 107 1.1 riastrad #include <sys/rndio.h> /* userland ioctl interface */ 108 1.1 riastrad #include <sys/rndsource.h> /* kernel rndsource driver API */ 109 1.1 riastrad #include <sys/select.h> 110 1.1 riastrad #include <sys/selinfo.h> 111 1.1 riastrad #include <sys/sha1.h> /* for boot seed checksum */ 112 1.1 riastrad #include <sys/stdint.h> 113 1.1 riastrad #include <sys/sysctl.h> 114 1.26 riastrad #include <sys/syslog.h> 115 1.1 riastrad #include <sys/systm.h> 116 1.1 riastrad #include <sys/time.h> 117 1.1 riastrad #include <sys/xcall.h> 118 1.1 riastrad 119 1.1 riastrad #include <lib/libkern/entpool.h> 120 1.1 riastrad 121 1.1 riastrad #include <machine/limits.h> 122 1.1 riastrad 123 1.1 riastrad #ifdef __HAVE_CPU_COUNTER 124 1.1 riastrad #include <machine/cpu_counter.h> 125 1.1 riastrad #endif 126 1.1 riastrad 127 1.62 riastrad #define MINENTROPYBYTES ENTROPY_CAPACITY 128 1.62 riastrad #define MINENTROPYBITS (MINENTROPYBYTES*NBBY) 129 1.62 riastrad #define MINSAMPLES (2*MINENTROPYBITS) 130 1.62 riastrad 131 1.1 riastrad /* 132 1.1 riastrad * struct entropy_cpu 133 1.1 riastrad * 134 1.1 riastrad * Per-CPU entropy state. The pool is allocated separately 135 1.1 riastrad * because percpu(9) sometimes moves per-CPU objects around 136 1.1 riastrad * without zeroing them, which would lead to unwanted copies of 137 1.34 andvar * sensitive secrets. The evcnt is allocated separately because 138 1.1 riastrad * evcnt(9) assumes it stays put in memory. 139 1.1 riastrad */ 140 1.1 riastrad struct entropy_cpu { 141 1.40 riastrad struct entropy_cpu_evcnt { 142 1.40 riastrad struct evcnt softint; 143 1.40 riastrad struct evcnt intrdrop; 144 1.40 riastrad struct evcnt intrtrunc; 145 1.40 riastrad } *ec_evcnt; 146 1.1 riastrad struct entpool *ec_pool; 147 1.62 riastrad unsigned ec_bitspending; 148 1.62 riastrad unsigned ec_samplespending; 149 1.1 riastrad bool ec_locked; 150 1.1 riastrad }; 151 1.1 riastrad 152 1.1 riastrad /* 153 1.43 riastrad * struct entropy_cpu_lock 154 1.43 riastrad * 155 1.43 riastrad * State for locking the per-CPU entropy state. 156 1.43 riastrad */ 157 1.43 riastrad struct entropy_cpu_lock { 158 1.43 riastrad int ecl_s; 159 1.66 ad long ecl_pctr; 160 1.43 riastrad }; 161 1.43 riastrad 162 1.43 riastrad /* 163 1.1 riastrad * struct rndsource_cpu 164 1.1 riastrad * 165 1.1 riastrad * Per-CPU rndsource state. 166 1.1 riastrad */ 167 1.1 riastrad struct rndsource_cpu { 168 1.28 riastrad unsigned rc_entropybits; 169 1.28 riastrad unsigned rc_timesamples; 170 1.28 riastrad unsigned rc_datasamples; 171 1.62 riastrad rnd_delta_t rc_timedelta; 172 1.1 riastrad }; 173 1.1 riastrad 174 1.1 riastrad /* 175 1.1 riastrad * entropy_global (a.k.a. E for short in this file) 176 1.1 riastrad * 177 1.1 riastrad * Global entropy state. Writes protected by the global lock. 178 1.1 riastrad * Some fields, marked (A), can be read outside the lock, and are 179 1.1 riastrad * maintained with atomic_load/store_relaxed. 180 1.1 riastrad */ 181 1.1 riastrad struct { 182 1.1 riastrad kmutex_t lock; /* covers all global state */ 183 1.1 riastrad struct entpool pool; /* global pool for extraction */ 184 1.62 riastrad unsigned bitsneeded; /* (A) needed globally */ 185 1.62 riastrad unsigned bitspending; /* pending in per-CPU pools */ 186 1.62 riastrad unsigned samplesneeded; /* (A) needed globally */ 187 1.62 riastrad unsigned samplespending; /* pending in per-CPU pools */ 188 1.1 riastrad unsigned timestamp; /* (A) time of last consolidation */ 189 1.1 riastrad unsigned epoch; /* (A) changes when needed -> 0 */ 190 1.1 riastrad kcondvar_t cv; /* notifies state changes */ 191 1.1 riastrad struct selinfo selq; /* notifies needed -> 0 */ 192 1.4 riastrad struct lwp *sourcelock; /* lock on list of sources */ 193 1.27 riastrad kcondvar_t sourcelock_cv; /* notifies sourcelock release */ 194 1.1 riastrad LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 195 1.1 riastrad bool consolidate; /* kick thread to consolidate */ 196 1.1 riastrad bool seed_rndsource; /* true if seed source is attached */ 197 1.1 riastrad bool seeded; /* true if seed file already loaded */ 198 1.1 riastrad } entropy_global __cacheline_aligned = { 199 1.1 riastrad /* Fields that must be initialized when the kernel is loaded. */ 200 1.62 riastrad .bitsneeded = MINENTROPYBITS, 201 1.62 riastrad .samplesneeded = MINSAMPLES, 202 1.14 riastrad .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 203 1.1 riastrad .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 204 1.1 riastrad }; 205 1.1 riastrad 206 1.1 riastrad #define E (&entropy_global) /* declutter */ 207 1.1 riastrad 208 1.1 riastrad /* Read-mostly globals */ 209 1.1 riastrad static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 210 1.1 riastrad static void *entropy_sih __read_mostly; /* softint handler */ 211 1.1 riastrad static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 212 1.1 riastrad 213 1.1 riastrad static struct krndsource seed_rndsource __read_mostly; 214 1.1 riastrad 215 1.1 riastrad /* 216 1.1 riastrad * Event counters 217 1.1 riastrad * 218 1.1 riastrad * Must be careful with adding these because they can serve as 219 1.1 riastrad * side channels. 220 1.1 riastrad */ 221 1.1 riastrad static struct evcnt entropy_discretionary_evcnt = 222 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 223 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 224 1.1 riastrad static struct evcnt entropy_immediate_evcnt = 225 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 226 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 227 1.1 riastrad static struct evcnt entropy_partial_evcnt = 228 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 229 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 230 1.1 riastrad static struct evcnt entropy_consolidate_evcnt = 231 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 232 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 233 1.1 riastrad static struct evcnt entropy_extract_fail_evcnt = 234 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 235 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 236 1.1 riastrad static struct evcnt entropy_request_evcnt = 237 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 238 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_request_evcnt); 239 1.1 riastrad static struct evcnt entropy_deplete_evcnt = 240 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 241 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 242 1.1 riastrad static struct evcnt entropy_notify_evcnt = 243 1.1 riastrad EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 244 1.1 riastrad EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 245 1.1 riastrad 246 1.1 riastrad /* Sysctl knobs */ 247 1.17 riastrad static bool entropy_collection = 1; 248 1.17 riastrad static bool entropy_depletion = 0; /* Silly! */ 249 1.1 riastrad 250 1.1 riastrad static const struct sysctlnode *entropy_sysctlroot; 251 1.1 riastrad static struct sysctllog *entropy_sysctllog; 252 1.1 riastrad 253 1.1 riastrad /* Forward declarations */ 254 1.1 riastrad static void entropy_init_cpu(void *, void *, struct cpu_info *); 255 1.1 riastrad static void entropy_fini_cpu(void *, void *, struct cpu_info *); 256 1.1 riastrad static void entropy_account_cpu(struct entropy_cpu *); 257 1.62 riastrad static void entropy_enter(const void *, size_t, unsigned, bool); 258 1.62 riastrad static bool entropy_enter_intr(const void *, size_t, unsigned, bool); 259 1.1 riastrad static void entropy_softintr(void *); 260 1.1 riastrad static void entropy_thread(void *); 261 1.62 riastrad static bool entropy_pending(void); 262 1.1 riastrad static void entropy_pending_cpu(void *, void *, struct cpu_info *); 263 1.13 riastrad static void entropy_do_consolidate(void); 264 1.13 riastrad static void entropy_consolidate_xc(void *, void *); 265 1.1 riastrad static void entropy_notify(void); 266 1.1 riastrad static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 267 1.10 riastrad static int sysctl_entropy_gather(SYSCTLFN_ARGS); 268 1.1 riastrad static void filt_entropy_read_detach(struct knote *); 269 1.1 riastrad static int filt_entropy_read_event(struct knote *, long); 270 1.49 riastrad static int entropy_request(size_t, int); 271 1.63 riastrad static void rnd_add_data_internal(struct krndsource *, const void *, 272 1.63 riastrad uint32_t, uint32_t, bool); 273 1.1 riastrad static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 274 1.63 riastrad uint32_t, bool, uint32_t, bool); 275 1.1 riastrad static unsigned rndsource_entropybits(struct krndsource *); 276 1.1 riastrad static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 277 1.1 riastrad static void rndsource_to_user(struct krndsource *, rndsource_t *); 278 1.1 riastrad static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 279 1.28 riastrad static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *); 280 1.1 riastrad 281 1.1 riastrad /* 282 1.1 riastrad * entropy_timer() 283 1.1 riastrad * 284 1.1 riastrad * Cycle counter, time counter, or anything that changes a wee bit 285 1.1 riastrad * unpredictably. 286 1.1 riastrad */ 287 1.1 riastrad static inline uint32_t 288 1.1 riastrad entropy_timer(void) 289 1.1 riastrad { 290 1.1 riastrad struct bintime bt; 291 1.1 riastrad uint32_t v; 292 1.1 riastrad 293 1.1 riastrad /* If we have a CPU cycle counter, use the low 32 bits. */ 294 1.1 riastrad #ifdef __HAVE_CPU_COUNTER 295 1.1 riastrad if (__predict_true(cpu_hascounter())) 296 1.1 riastrad return cpu_counter32(); 297 1.1 riastrad #endif /* __HAVE_CPU_COUNTER */ 298 1.1 riastrad 299 1.1 riastrad /* If we're cold, tough. Can't binuptime while cold. */ 300 1.1 riastrad if (__predict_false(cold)) 301 1.1 riastrad return 0; 302 1.1 riastrad 303 1.1 riastrad /* Fold the 128 bits of binuptime into 32 bits. */ 304 1.1 riastrad binuptime(&bt); 305 1.1 riastrad v = bt.frac; 306 1.1 riastrad v ^= bt.frac >> 32; 307 1.1 riastrad v ^= bt.sec; 308 1.1 riastrad v ^= bt.sec >> 32; 309 1.1 riastrad return v; 310 1.1 riastrad } 311 1.1 riastrad 312 1.1 riastrad static void 313 1.1 riastrad attach_seed_rndsource(void) 314 1.1 riastrad { 315 1.1 riastrad 316 1.63 riastrad KASSERT(!cpu_intr_p()); 317 1.63 riastrad KASSERT(!cpu_softintr_p()); 318 1.63 riastrad KASSERT(cold); 319 1.63 riastrad 320 1.1 riastrad /* 321 1.1 riastrad * First called no later than entropy_init, while we are still 322 1.1 riastrad * single-threaded, so no need for RUN_ONCE. 323 1.1 riastrad */ 324 1.63 riastrad if (E->seed_rndsource) 325 1.1 riastrad return; 326 1.63 riastrad 327 1.1 riastrad rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 328 1.1 riastrad RND_FLAG_COLLECT_VALUE); 329 1.1 riastrad E->seed_rndsource = true; 330 1.1 riastrad } 331 1.1 riastrad 332 1.1 riastrad /* 333 1.1 riastrad * entropy_init() 334 1.1 riastrad * 335 1.1 riastrad * Initialize the entropy subsystem. Panic on failure. 336 1.1 riastrad * 337 1.63 riastrad * Requires percpu(9) and sysctl(9) to be initialized. Must run 338 1.63 riastrad * while cold. 339 1.1 riastrad */ 340 1.1 riastrad static void 341 1.1 riastrad entropy_init(void) 342 1.1 riastrad { 343 1.1 riastrad uint32_t extra[2]; 344 1.1 riastrad struct krndsource *rs; 345 1.1 riastrad unsigned i = 0; 346 1.1 riastrad 347 1.63 riastrad KASSERT(cold); 348 1.1 riastrad 349 1.1 riastrad /* Grab some cycle counts early at boot. */ 350 1.1 riastrad extra[i++] = entropy_timer(); 351 1.1 riastrad 352 1.1 riastrad /* Run the entropy pool cryptography self-test. */ 353 1.1 riastrad if (entpool_selftest() == -1) 354 1.1 riastrad panic("entropy pool crypto self-test failed"); 355 1.1 riastrad 356 1.1 riastrad /* Create the sysctl directory. */ 357 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 358 1.1 riastrad CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 359 1.1 riastrad SYSCTL_DESCR("Entropy (random number sources) options"), 360 1.1 riastrad NULL, 0, NULL, 0, 361 1.73 riastrad CTL_KERN, KERN_ENTROPY, CTL_EOL); 362 1.1 riastrad 363 1.1 riastrad /* Create the sysctl knobs. */ 364 1.1 riastrad /* XXX These shouldn't be writable at securelevel>0. */ 365 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 366 1.1 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 367 1.1 riastrad SYSCTL_DESCR("Automatically collect entropy from hardware"), 368 1.1 riastrad NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 369 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 370 1.1 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 371 1.1 riastrad SYSCTL_DESCR("`Deplete' entropy pool when observed"), 372 1.1 riastrad NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 373 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 374 1.1 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 375 1.1 riastrad SYSCTL_DESCR("Trigger entropy consolidation now"), 376 1.1 riastrad sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 377 1.10 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 378 1.10 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 379 1.10 riastrad SYSCTL_DESCR("Trigger entropy gathering from sources now"), 380 1.10 riastrad sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 381 1.1 riastrad /* XXX These should maybe not be readable at securelevel>0. */ 382 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 383 1.1 riastrad CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 384 1.62 riastrad "needed", 385 1.62 riastrad SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"), 386 1.62 riastrad NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL); 387 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 388 1.1 riastrad CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 389 1.62 riastrad "pending", 390 1.62 riastrad SYSCTL_DESCR("Number of bits of entropy pending on CPUs"), 391 1.62 riastrad NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL); 392 1.62 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 393 1.62 riastrad CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 394 1.62 riastrad "samplesneeded", 395 1.62 riastrad SYSCTL_DESCR("Systemwide entropy deficit (samples)"), 396 1.62 riastrad NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL); 397 1.62 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 398 1.62 riastrad CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 399 1.62 riastrad "samplespending", 400 1.62 riastrad SYSCTL_DESCR("Number of samples pending on CPUs"), 401 1.62 riastrad NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL); 402 1.1 riastrad sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 403 1.71 riastrad CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, 404 1.1 riastrad "epoch", SYSCTL_DESCR("Entropy epoch"), 405 1.73 riastrad NULL, 0, &E->epoch, 0, KERN_ENTROPY_EPOCH, CTL_EOL); 406 1.1 riastrad 407 1.1 riastrad /* Initialize the global state for multithreaded operation. */ 408 1.39 riastrad mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); 409 1.1 riastrad cv_init(&E->cv, "entropy"); 410 1.1 riastrad selinit(&E->selq); 411 1.27 riastrad cv_init(&E->sourcelock_cv, "entsrclock"); 412 1.1 riastrad 413 1.1 riastrad /* Make sure the seed source is attached. */ 414 1.1 riastrad attach_seed_rndsource(); 415 1.1 riastrad 416 1.1 riastrad /* Note if the bootloader didn't provide a seed. */ 417 1.1 riastrad if (!E->seeded) 418 1.29 riastrad aprint_debug("entropy: no seed from bootloader\n"); 419 1.1 riastrad 420 1.1 riastrad /* Allocate the per-CPU records for all early entropy sources. */ 421 1.1 riastrad LIST_FOREACH(rs, &E->sources, list) 422 1.1 riastrad rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 423 1.1 riastrad 424 1.36 riastrad /* Allocate and initialize the per-CPU state. */ 425 1.36 riastrad entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 426 1.36 riastrad entropy_init_cpu, entropy_fini_cpu, NULL); 427 1.36 riastrad 428 1.1 riastrad /* Enter the boot cycle count to get started. */ 429 1.1 riastrad extra[i++] = entropy_timer(); 430 1.1 riastrad KASSERT(i == __arraycount(extra)); 431 1.62 riastrad entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false); 432 1.1 riastrad explicit_memset(extra, 0, sizeof extra); 433 1.37 riastrad } 434 1.37 riastrad 435 1.1 riastrad /* 436 1.1 riastrad * entropy_init_late() 437 1.1 riastrad * 438 1.1 riastrad * Late initialization. Panic on failure. 439 1.1 riastrad * 440 1.1 riastrad * Requires CPUs to have been detected and LWPs to have started. 441 1.63 riastrad * Must run while cold. 442 1.1 riastrad */ 443 1.1 riastrad static void 444 1.1 riastrad entropy_init_late(void) 445 1.1 riastrad { 446 1.1 riastrad int error; 447 1.1 riastrad 448 1.63 riastrad KASSERT(cold); 449 1.1 riastrad 450 1.1 riastrad /* 451 1.1 riastrad * Establish the softint at the highest softint priority level. 452 1.1 riastrad * Must happen after CPU detection. 453 1.1 riastrad */ 454 1.63 riastrad entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 455 1.1 riastrad &entropy_softintr, NULL); 456 1.63 riastrad if (entropy_sih == NULL) 457 1.1 riastrad panic("unable to establish entropy softint"); 458 1.1 riastrad 459 1.1 riastrad /* 460 1.1 riastrad * Create the entropy housekeeping thread. Must happen after 461 1.1 riastrad * lwpinit. 462 1.1 riastrad */ 463 1.1 riastrad error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 464 1.1 riastrad entropy_thread, NULL, &entropy_lwp, "entbutler"); 465 1.1 riastrad if (error) 466 1.1 riastrad panic("unable to create entropy housekeeping thread: %d", 467 1.1 riastrad error); 468 1.1 riastrad } 469 1.1 riastrad 470 1.1 riastrad /* 471 1.1 riastrad * entropy_init_cpu(ptr, cookie, ci) 472 1.1 riastrad * 473 1.1 riastrad * percpu(9) constructor for per-CPU entropy pool. 474 1.1 riastrad */ 475 1.1 riastrad static void 476 1.1 riastrad entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 477 1.1 riastrad { 478 1.1 riastrad struct entropy_cpu *ec = ptr; 479 1.40 riastrad const char *cpuname; 480 1.1 riastrad 481 1.40 riastrad ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP); 482 1.1 riastrad ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 483 1.62 riastrad ec->ec_bitspending = 0; 484 1.62 riastrad ec->ec_samplespending = 0; 485 1.1 riastrad ec->ec_locked = false; 486 1.1 riastrad 487 1.36 riastrad /* XXX ci_cpuname may not be initialized early enough. */ 488 1.40 riastrad cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; 489 1.40 riastrad evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL, 490 1.40 riastrad cpuname, "entropy softint"); 491 1.40 riastrad evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL, 492 1.40 riastrad cpuname, "entropy intrdrop"); 493 1.40 riastrad evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL, 494 1.40 riastrad cpuname, "entropy intrtrunc"); 495 1.1 riastrad } 496 1.1 riastrad 497 1.1 riastrad /* 498 1.1 riastrad * entropy_fini_cpu(ptr, cookie, ci) 499 1.1 riastrad * 500 1.1 riastrad * percpu(9) destructor for per-CPU entropy pool. 501 1.1 riastrad */ 502 1.1 riastrad static void 503 1.1 riastrad entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 504 1.1 riastrad { 505 1.1 riastrad struct entropy_cpu *ec = ptr; 506 1.1 riastrad 507 1.1 riastrad /* 508 1.1 riastrad * Zero any lingering data. Disclosure of the per-CPU pool 509 1.1 riastrad * shouldn't retroactively affect the security of any keys 510 1.1 riastrad * generated, because entpool(9) erases whatever we have just 511 1.1 riastrad * drawn out of any pool, but better safe than sorry. 512 1.1 riastrad */ 513 1.1 riastrad explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 514 1.1 riastrad 515 1.40 riastrad evcnt_detach(&ec->ec_evcnt->intrtrunc); 516 1.40 riastrad evcnt_detach(&ec->ec_evcnt->intrdrop); 517 1.40 riastrad evcnt_detach(&ec->ec_evcnt->softint); 518 1.1 riastrad 519 1.1 riastrad kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 520 1.40 riastrad kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt)); 521 1.1 riastrad } 522 1.1 riastrad 523 1.1 riastrad /* 524 1.43 riastrad * ec = entropy_cpu_get(&lock) 525 1.43 riastrad * entropy_cpu_put(&lock, ec) 526 1.43 riastrad * 527 1.43 riastrad * Lock and unlock the per-CPU entropy state. This only prevents 528 1.43 riastrad * access on the same CPU -- by hard interrupts, by soft 529 1.43 riastrad * interrupts, or by other threads. 530 1.43 riastrad * 531 1.43 riastrad * Blocks soft interrupts and preemption altogether; doesn't block 532 1.43 riastrad * hard interrupts, but causes samples in hard interrupts to be 533 1.43 riastrad * dropped. 534 1.43 riastrad */ 535 1.43 riastrad static struct entropy_cpu * 536 1.43 riastrad entropy_cpu_get(struct entropy_cpu_lock *lock) 537 1.43 riastrad { 538 1.43 riastrad struct entropy_cpu *ec; 539 1.43 riastrad 540 1.43 riastrad ec = percpu_getref(entropy_percpu); 541 1.43 riastrad lock->ecl_s = splsoftserial(); 542 1.43 riastrad KASSERT(!ec->ec_locked); 543 1.43 riastrad ec->ec_locked = true; 544 1.66 ad lock->ecl_pctr = lwp_pctr(); 545 1.43 riastrad __insn_barrier(); 546 1.43 riastrad 547 1.43 riastrad return ec; 548 1.43 riastrad } 549 1.43 riastrad 550 1.43 riastrad static void 551 1.43 riastrad entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec) 552 1.43 riastrad { 553 1.43 riastrad 554 1.43 riastrad KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu())); 555 1.43 riastrad KASSERT(ec->ec_locked); 556 1.43 riastrad 557 1.43 riastrad __insn_barrier(); 558 1.66 ad KASSERT(lock->ecl_pctr == lwp_pctr()); 559 1.43 riastrad ec->ec_locked = false; 560 1.43 riastrad splx(lock->ecl_s); 561 1.43 riastrad percpu_putref(entropy_percpu); 562 1.43 riastrad } 563 1.43 riastrad 564 1.43 riastrad /* 565 1.1 riastrad * entropy_seed(seed) 566 1.1 riastrad * 567 1.1 riastrad * Seed the entropy pool with seed. Meant to be called as early 568 1.1 riastrad * as possible by the bootloader; may be called before or after 569 1.1 riastrad * entropy_init. Must be called before system reaches userland. 570 1.1 riastrad * Must be called in thread or soft interrupt context, not in hard 571 1.1 riastrad * interrupt context. Must be called at most once. 572 1.1 riastrad * 573 1.1 riastrad * Overwrites the seed in place. Caller may then free the memory. 574 1.1 riastrad */ 575 1.1 riastrad static void 576 1.1 riastrad entropy_seed(rndsave_t *seed) 577 1.1 riastrad { 578 1.1 riastrad SHA1_CTX ctx; 579 1.1 riastrad uint8_t digest[SHA1_DIGEST_LENGTH]; 580 1.1 riastrad bool seeded; 581 1.1 riastrad 582 1.63 riastrad KASSERT(!cpu_intr_p()); 583 1.63 riastrad KASSERT(!cpu_softintr_p()); 584 1.63 riastrad KASSERT(cold); 585 1.63 riastrad 586 1.1 riastrad /* 587 1.1 riastrad * Verify the checksum. If the checksum fails, take the data 588 1.1 riastrad * but ignore the entropy estimate -- the file may have been 589 1.1 riastrad * incompletely written with garbage, which is harmless to add 590 1.1 riastrad * but may not be as unpredictable as alleged. 591 1.1 riastrad */ 592 1.1 riastrad SHA1Init(&ctx); 593 1.1 riastrad SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 594 1.1 riastrad SHA1Update(&ctx, seed->data, sizeof(seed->data)); 595 1.1 riastrad SHA1Final(digest, &ctx); 596 1.1 riastrad CTASSERT(sizeof(seed->digest) == sizeof(digest)); 597 1.1 riastrad if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 598 1.1 riastrad printf("entropy: invalid seed checksum\n"); 599 1.1 riastrad seed->entropy = 0; 600 1.1 riastrad } 601 1.2 riastrad explicit_memset(&ctx, 0, sizeof ctx); 602 1.1 riastrad explicit_memset(digest, 0, sizeof digest); 603 1.1 riastrad 604 1.2 riastrad /* 605 1.2 riastrad * If the entropy is insensibly large, try byte-swapping. 606 1.2 riastrad * Otherwise assume the file is corrupted and act as though it 607 1.2 riastrad * has zero entropy. 608 1.2 riastrad */ 609 1.2 riastrad if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 610 1.2 riastrad seed->entropy = bswap32(seed->entropy); 611 1.2 riastrad if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 612 1.2 riastrad seed->entropy = 0; 613 1.2 riastrad } 614 1.2 riastrad 615 1.1 riastrad /* Make sure the seed source is attached. */ 616 1.1 riastrad attach_seed_rndsource(); 617 1.1 riastrad 618 1.1 riastrad /* Test and set E->seeded. */ 619 1.1 riastrad seeded = E->seeded; 620 1.11 riastrad E->seeded = (seed->entropy > 0); 621 1.1 riastrad 622 1.1 riastrad /* 623 1.1 riastrad * If we've been seeded, may be re-entering the same seed 624 1.1 riastrad * (e.g., bootloader vs module init, or something). No harm in 625 1.1 riastrad * entering it twice, but it contributes no additional entropy. 626 1.1 riastrad */ 627 1.1 riastrad if (seeded) { 628 1.1 riastrad printf("entropy: double-seeded by bootloader\n"); 629 1.1 riastrad seed->entropy = 0; 630 1.1 riastrad } else { 631 1.11 riastrad printf("entropy: entering seed from bootloader" 632 1.11 riastrad " with %u bits of entropy\n", (unsigned)seed->entropy); 633 1.1 riastrad } 634 1.1 riastrad 635 1.1 riastrad /* Enter it into the pool and promptly zero it. */ 636 1.1 riastrad rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 637 1.1 riastrad seed->entropy); 638 1.1 riastrad explicit_memset(seed, 0, sizeof(*seed)); 639 1.1 riastrad } 640 1.1 riastrad 641 1.1 riastrad /* 642 1.1 riastrad * entropy_bootrequest() 643 1.1 riastrad * 644 1.1 riastrad * Request entropy from all sources at boot, once config is 645 1.63 riastrad * complete and interrupts are running but we are still cold. 646 1.1 riastrad */ 647 1.1 riastrad void 648 1.1 riastrad entropy_bootrequest(void) 649 1.1 riastrad { 650 1.49 riastrad int error; 651 1.1 riastrad 652 1.63 riastrad KASSERT(!cpu_intr_p()); 653 1.63 riastrad KASSERT(!cpu_softintr_p()); 654 1.63 riastrad KASSERT(cold); 655 1.1 riastrad 656 1.1 riastrad /* 657 1.1 riastrad * Request enough to satisfy the maximum entropy shortage. 658 1.1 riastrad * This is harmless overkill if the bootloader provided a seed. 659 1.1 riastrad */ 660 1.62 riastrad error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT); 661 1.63 riastrad KASSERTMSG(error == 0, "error=%d", error); 662 1.1 riastrad } 663 1.1 riastrad 664 1.1 riastrad /* 665 1.1 riastrad * entropy_epoch() 666 1.1 riastrad * 667 1.1 riastrad * Returns the current entropy epoch. If this changes, you should 668 1.14 riastrad * reseed. If -1, means system entropy has not yet reached full 669 1.14 riastrad * entropy or been explicitly consolidated; never reverts back to 670 1.14 riastrad * -1. Never zero, so you can always use zero as an uninitialized 671 1.14 riastrad * sentinel value meaning `reseed ASAP'. 672 1.1 riastrad * 673 1.1 riastrad * Usage model: 674 1.1 riastrad * 675 1.1 riastrad * struct foo { 676 1.1 riastrad * struct crypto_prng prng; 677 1.1 riastrad * unsigned epoch; 678 1.1 riastrad * } *foo; 679 1.1 riastrad * 680 1.1 riastrad * unsigned epoch = entropy_epoch(); 681 1.1 riastrad * if (__predict_false(epoch != foo->epoch)) { 682 1.1 riastrad * uint8_t seed[32]; 683 1.1 riastrad * if (entropy_extract(seed, sizeof seed, 0) != 0) 684 1.1 riastrad * warn("no entropy"); 685 1.1 riastrad * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 686 1.1 riastrad * foo->epoch = epoch; 687 1.1 riastrad * } 688 1.1 riastrad */ 689 1.1 riastrad unsigned 690 1.1 riastrad entropy_epoch(void) 691 1.1 riastrad { 692 1.1 riastrad 693 1.1 riastrad /* 694 1.1 riastrad * Unsigned int, so no need for seqlock for an atomic read, but 695 1.1 riastrad * make sure we read it afresh each time. 696 1.1 riastrad */ 697 1.1 riastrad return atomic_load_relaxed(&E->epoch); 698 1.1 riastrad } 699 1.1 riastrad 700 1.1 riastrad /* 701 1.23 riastrad * entropy_ready() 702 1.23 riastrad * 703 1.23 riastrad * True if the entropy pool has full entropy. 704 1.23 riastrad */ 705 1.23 riastrad bool 706 1.23 riastrad entropy_ready(void) 707 1.23 riastrad { 708 1.23 riastrad 709 1.62 riastrad return atomic_load_relaxed(&E->bitsneeded) == 0; 710 1.23 riastrad } 711 1.23 riastrad 712 1.23 riastrad /* 713 1.1 riastrad * entropy_account_cpu(ec) 714 1.1 riastrad * 715 1.1 riastrad * Consider whether to consolidate entropy into the global pool 716 1.1 riastrad * after we just added some into the current CPU's pending pool. 717 1.1 riastrad * 718 1.1 riastrad * - If this CPU can provide enough entropy now, do so. 719 1.1 riastrad * 720 1.1 riastrad * - If this and whatever else is available on other CPUs can 721 1.1 riastrad * provide enough entropy, kick the consolidation thread. 722 1.1 riastrad * 723 1.1 riastrad * - Otherwise, do as little as possible, except maybe consolidate 724 1.1 riastrad * entropy at most once a minute. 725 1.1 riastrad * 726 1.1 riastrad * Caller must be bound to a CPU and therefore have exclusive 727 1.1 riastrad * access to ec. Will acquire and release the global lock. 728 1.1 riastrad */ 729 1.1 riastrad static void 730 1.1 riastrad entropy_account_cpu(struct entropy_cpu *ec) 731 1.1 riastrad { 732 1.44 riastrad struct entropy_cpu_lock lock; 733 1.44 riastrad struct entropy_cpu *ec0; 734 1.62 riastrad unsigned bitsdiff, samplesdiff; 735 1.1 riastrad 736 1.63 riastrad KASSERT(!cpu_intr_p()); 737 1.63 riastrad KASSERT(!cold); 738 1.52 riastrad KASSERT(curlwp->l_pflag & LP_BOUND); 739 1.1 riastrad 740 1.1 riastrad /* 741 1.1 riastrad * If there's no entropy needed, and entropy has been 742 1.1 riastrad * consolidated in the last minute, do nothing. 743 1.1 riastrad */ 744 1.62 riastrad if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) && 745 1.1 riastrad __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 746 1.1 riastrad __predict_true((time_uptime - E->timestamp) <= 60)) 747 1.1 riastrad return; 748 1.1 riastrad 749 1.44 riastrad /* 750 1.44 riastrad * Consider consolidation, under the global lock and with the 751 1.44 riastrad * per-CPU state locked. 752 1.44 riastrad */ 753 1.1 riastrad mutex_enter(&E->lock); 754 1.44 riastrad ec0 = entropy_cpu_get(&lock); 755 1.44 riastrad KASSERT(ec0 == ec); 756 1.62 riastrad 757 1.62 riastrad if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) { 758 1.46 riastrad /* Raced with consolidation xcall. Nothing to do. */ 759 1.62 riastrad } else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) { 760 1.1 riastrad /* 761 1.1 riastrad * If we have not yet attained full entropy but we can 762 1.1 riastrad * now, do so. This way we disseminate entropy 763 1.1 riastrad * promptly when it becomes available early at boot; 764 1.1 riastrad * otherwise we leave it to the entropy consolidation 765 1.1 riastrad * thread, which is rate-limited to mitigate side 766 1.1 riastrad * channels and abuse. 767 1.1 riastrad */ 768 1.1 riastrad uint8_t buf[ENTPOOL_CAPACITY]; 769 1.1 riastrad 770 1.1 riastrad /* Transfer from the local pool to the global pool. */ 771 1.1 riastrad entpool_extract(ec->ec_pool, buf, sizeof buf); 772 1.1 riastrad entpool_enter(&E->pool, buf, sizeof buf); 773 1.62 riastrad atomic_store_relaxed(&ec->ec_bitspending, 0); 774 1.62 riastrad atomic_store_relaxed(&ec->ec_samplespending, 0); 775 1.62 riastrad atomic_store_relaxed(&E->bitsneeded, 0); 776 1.62 riastrad atomic_store_relaxed(&E->samplesneeded, 0); 777 1.1 riastrad 778 1.1 riastrad /* Notify waiters that we now have full entropy. */ 779 1.1 riastrad entropy_notify(); 780 1.1 riastrad entropy_immediate_evcnt.ev_count++; 781 1.18 riastrad } else { 782 1.45 riastrad /* Determine how much we can add to the global pool. */ 783 1.62 riastrad KASSERTMSG(E->bitspending <= MINENTROPYBITS, 784 1.62 riastrad "E->bitspending=%u", E->bitspending); 785 1.62 riastrad bitsdiff = MIN(ec->ec_bitspending, 786 1.62 riastrad MINENTROPYBITS - E->bitspending); 787 1.62 riastrad KASSERTMSG(E->samplespending <= MINSAMPLES, 788 1.62 riastrad "E->samplespending=%u", E->samplespending); 789 1.62 riastrad samplesdiff = MIN(ec->ec_samplespending, 790 1.62 riastrad MINSAMPLES - E->samplespending); 791 1.1 riastrad 792 1.1 riastrad /* 793 1.45 riastrad * This should make a difference unless we are already 794 1.45 riastrad * saturated. 795 1.1 riastrad */ 796 1.62 riastrad KASSERTMSG((bitsdiff || samplesdiff || 797 1.62 riastrad E->bitspending == MINENTROPYBITS || 798 1.62 riastrad E->samplespending == MINSAMPLES), 799 1.62 riastrad "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u" 800 1.62 riastrad "samplesdiff=%u E->samplespending=%u" 801 1.62 riastrad " ec->ec_samplespending=%u" 802 1.62 riastrad " minentropybits=%u minsamples=%u", 803 1.62 riastrad bitsdiff, E->bitspending, ec->ec_bitspending, 804 1.62 riastrad samplesdiff, E->samplespending, ec->ec_samplespending, 805 1.62 riastrad (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES); 806 1.45 riastrad 807 1.45 riastrad /* Add to the global, subtract from the local. */ 808 1.62 riastrad E->bitspending += bitsdiff; 809 1.62 riastrad KASSERTMSG(E->bitspending <= MINENTROPYBITS, 810 1.62 riastrad "E->bitspending=%u", E->bitspending); 811 1.62 riastrad atomic_store_relaxed(&ec->ec_bitspending, 812 1.62 riastrad ec->ec_bitspending - bitsdiff); 813 1.62 riastrad 814 1.62 riastrad E->samplespending += samplesdiff; 815 1.62 riastrad KASSERTMSG(E->samplespending <= MINSAMPLES, 816 1.62 riastrad "E->samplespending=%u", E->samplespending); 817 1.62 riastrad atomic_store_relaxed(&ec->ec_samplespending, 818 1.62 riastrad ec->ec_samplespending - samplesdiff); 819 1.1 riastrad 820 1.62 riastrad /* One or the other must have gone up from zero. */ 821 1.62 riastrad KASSERT(E->bitspending || E->samplespending); 822 1.62 riastrad 823 1.62 riastrad if (E->bitsneeded <= E->bitspending || 824 1.62 riastrad E->samplesneeded <= E->samplespending) { 825 1.1 riastrad /* 826 1.62 riastrad * Enough bits or at least samples between all 827 1.62 riastrad * the per-CPU pools. Leave a note for the 828 1.62 riastrad * housekeeping thread to consolidate entropy 829 1.62 riastrad * next time it wakes up -- and wake it up if 830 1.62 riastrad * this is the first time, to speed things up. 831 1.1 riastrad * 832 1.1 riastrad * If we don't need any entropy, this doesn't 833 1.1 riastrad * mean much, but it is the only time we ever 834 1.1 riastrad * gather additional entropy in case the 835 1.1 riastrad * accounting has been overly optimistic. This 836 1.1 riastrad * happens at most once a minute, so there's 837 1.1 riastrad * negligible performance cost. 838 1.1 riastrad */ 839 1.1 riastrad E->consolidate = true; 840 1.62 riastrad if (E->epoch == (unsigned)-1) 841 1.62 riastrad cv_broadcast(&E->cv); 842 1.62 riastrad if (E->bitsneeded == 0) 843 1.1 riastrad entropy_discretionary_evcnt.ev_count++; 844 1.1 riastrad } else { 845 1.1 riastrad /* Can't get full entropy. Keep gathering. */ 846 1.1 riastrad entropy_partial_evcnt.ev_count++; 847 1.1 riastrad } 848 1.1 riastrad } 849 1.62 riastrad 850 1.44 riastrad entropy_cpu_put(&lock, ec); 851 1.1 riastrad mutex_exit(&E->lock); 852 1.1 riastrad } 853 1.1 riastrad 854 1.1 riastrad /* 855 1.1 riastrad * entropy_enter_early(buf, len, nbits) 856 1.1 riastrad * 857 1.1 riastrad * Do entropy bookkeeping globally, before we have established 858 1.1 riastrad * per-CPU pools. Enter directly into the global pool in the hope 859 1.1 riastrad * that we enter enough before the first entropy_extract to thwart 860 1.1 riastrad * iterative-guessing attacks; entropy_extract will warn if not. 861 1.1 riastrad */ 862 1.1 riastrad static void 863 1.1 riastrad entropy_enter_early(const void *buf, size_t len, unsigned nbits) 864 1.1 riastrad { 865 1.1 riastrad bool notify = false; 866 1.63 riastrad int s; 867 1.63 riastrad 868 1.63 riastrad KASSERT(cold); 869 1.1 riastrad 870 1.63 riastrad /* 871 1.63 riastrad * We're early at boot before multithreading and multi-CPU 872 1.63 riastrad * operation, and we don't have softints yet to defer 873 1.63 riastrad * processing from interrupt context, so we have to enter the 874 1.63 riastrad * samples directly into the global pool. But interrupts may 875 1.63 riastrad * be enabled, and we enter this path from interrupt context, 876 1.63 riastrad * so block interrupts until we're done. 877 1.63 riastrad */ 878 1.63 riastrad s = splhigh(); 879 1.1 riastrad 880 1.1 riastrad /* Enter it into the pool. */ 881 1.1 riastrad entpool_enter(&E->pool, buf, len); 882 1.1 riastrad 883 1.1 riastrad /* 884 1.1 riastrad * Decide whether to notify reseed -- we will do so if either: 885 1.1 riastrad * (a) we transition from partial entropy to full entropy, or 886 1.1 riastrad * (b) we get a batch of full entropy all at once. 887 1.63 riastrad * We don't count timing samples because we assume, while cold, 888 1.63 riastrad * there's not likely to be much jitter yet. 889 1.1 riastrad */ 890 1.62 riastrad notify |= (E->bitsneeded && E->bitsneeded <= nbits); 891 1.62 riastrad notify |= (nbits >= MINENTROPYBITS); 892 1.1 riastrad 893 1.62 riastrad /* 894 1.62 riastrad * Subtract from the needed count and notify if appropriate. 895 1.62 riastrad * We don't count samples here because entropy_timer might 896 1.62 riastrad * still be returning zero at this point if there's no CPU 897 1.62 riastrad * cycle counter. 898 1.62 riastrad */ 899 1.62 riastrad E->bitsneeded -= MIN(E->bitsneeded, nbits); 900 1.1 riastrad if (notify) { 901 1.1 riastrad entropy_notify(); 902 1.1 riastrad entropy_immediate_evcnt.ev_count++; 903 1.1 riastrad } 904 1.63 riastrad 905 1.63 riastrad splx(s); 906 1.1 riastrad } 907 1.1 riastrad 908 1.1 riastrad /* 909 1.62 riastrad * entropy_enter(buf, len, nbits, count) 910 1.1 riastrad * 911 1.1 riastrad * Enter len bytes of data from buf into the system's entropy 912 1.1 riastrad * pool, stirring as necessary when the internal buffer fills up. 913 1.1 riastrad * nbits is a lower bound on the number of bits of entropy in the 914 1.1 riastrad * process that led to this sample. 915 1.1 riastrad */ 916 1.1 riastrad static void 917 1.62 riastrad entropy_enter(const void *buf, size_t len, unsigned nbits, bool count) 918 1.1 riastrad { 919 1.43 riastrad struct entropy_cpu_lock lock; 920 1.1 riastrad struct entropy_cpu *ec; 921 1.62 riastrad unsigned bitspending, samplespending; 922 1.52 riastrad int bound; 923 1.1 riastrad 924 1.16 riastrad KASSERTMSG(!cpu_intr_p(), 925 1.1 riastrad "use entropy_enter_intr from interrupt context"); 926 1.1 riastrad KASSERTMSG(howmany(nbits, NBBY) <= len, 927 1.1 riastrad "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 928 1.1 riastrad 929 1.63 riastrad /* 930 1.63 riastrad * If we're still cold, just use entropy_enter_early to put 931 1.63 riastrad * samples directly into the global pool. 932 1.63 riastrad */ 933 1.63 riastrad if (__predict_false(cold)) { 934 1.1 riastrad entropy_enter_early(buf, len, nbits); 935 1.1 riastrad return; 936 1.1 riastrad } 937 1.1 riastrad 938 1.1 riastrad /* 939 1.52 riastrad * Bind ourselves to the current CPU so we don't switch CPUs 940 1.52 riastrad * between entering data into the current CPU's pool (and 941 1.52 riastrad * updating the pending count) and transferring it to the 942 1.52 riastrad * global pool in entropy_account_cpu. 943 1.52 riastrad */ 944 1.52 riastrad bound = curlwp_bind(); 945 1.52 riastrad 946 1.52 riastrad /* 947 1.43 riastrad * With the per-CPU state locked, enter into the per-CPU pool 948 1.43 riastrad * and count up what we can add. 949 1.62 riastrad * 950 1.62 riastrad * We don't count samples while cold because entropy_timer 951 1.62 riastrad * might still be returning zero if there's no CPU cycle 952 1.62 riastrad * counter. 953 1.1 riastrad */ 954 1.43 riastrad ec = entropy_cpu_get(&lock); 955 1.1 riastrad entpool_enter(ec->ec_pool, buf, len); 956 1.62 riastrad bitspending = ec->ec_bitspending; 957 1.62 riastrad bitspending += MIN(MINENTROPYBITS - bitspending, nbits); 958 1.62 riastrad atomic_store_relaxed(&ec->ec_bitspending, bitspending); 959 1.62 riastrad samplespending = ec->ec_samplespending; 960 1.62 riastrad if (__predict_true(count)) { 961 1.62 riastrad samplespending += MIN(MINSAMPLES - samplespending, 1); 962 1.62 riastrad atomic_store_relaxed(&ec->ec_samplespending, samplespending); 963 1.62 riastrad } 964 1.43 riastrad entropy_cpu_put(&lock, ec); 965 1.42 riastrad 966 1.42 riastrad /* Consolidate globally if appropriate based on what we added. */ 967 1.62 riastrad if (bitspending > 0 || samplespending >= MINSAMPLES) 968 1.42 riastrad entropy_account_cpu(ec); 969 1.52 riastrad 970 1.52 riastrad curlwp_bindx(bound); 971 1.1 riastrad } 972 1.1 riastrad 973 1.1 riastrad /* 974 1.62 riastrad * entropy_enter_intr(buf, len, nbits, count) 975 1.1 riastrad * 976 1.1 riastrad * Enter up to len bytes of data from buf into the system's 977 1.1 riastrad * entropy pool without stirring. nbits is a lower bound on the 978 1.1 riastrad * number of bits of entropy in the process that led to this 979 1.1 riastrad * sample. If the sample could be entered completely, assume 980 1.1 riastrad * nbits of entropy pending; otherwise assume none, since we don't 981 1.1 riastrad * know whether some parts of the sample are constant, for 982 1.1 riastrad * instance. Schedule a softint to stir the entropy pool if 983 1.1 riastrad * needed. Return true if used fully, false if truncated at all. 984 1.1 riastrad * 985 1.63 riastrad * Using this in thread or softint context with no spin locks held 986 1.63 riastrad * will work, but you might as well use entropy_enter in that 987 1.63 riastrad * case. 988 1.1 riastrad */ 989 1.1 riastrad static bool 990 1.62 riastrad entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count) 991 1.1 riastrad { 992 1.1 riastrad struct entropy_cpu *ec; 993 1.1 riastrad bool fullyused = false; 994 1.62 riastrad uint32_t bitspending, samplespending; 995 1.63 riastrad int s; 996 1.1 riastrad 997 1.1 riastrad KASSERTMSG(howmany(nbits, NBBY) <= len, 998 1.1 riastrad "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 999 1.1 riastrad 1000 1.63 riastrad /* 1001 1.63 riastrad * If we're still cold, just use entropy_enter_early to put 1002 1.63 riastrad * samples directly into the global pool. 1003 1.63 riastrad */ 1004 1.63 riastrad if (__predict_false(cold)) { 1005 1.1 riastrad entropy_enter_early(buf, len, nbits); 1006 1.1 riastrad return true; 1007 1.1 riastrad } 1008 1.1 riastrad 1009 1.1 riastrad /* 1010 1.63 riastrad * In case we were called in thread or interrupt context with 1011 1.63 riastrad * interrupts unblocked, block soft interrupts up to 1012 1.63 riastrad * IPL_SOFTSERIAL. This way logic that is safe in interrupt 1013 1.63 riastrad * context or under a spin lock is also safe in less 1014 1.63 riastrad * restrictive contexts. 1015 1.63 riastrad */ 1016 1.63 riastrad s = splsoftserial(); 1017 1.63 riastrad 1018 1.63 riastrad /* 1019 1.1 riastrad * Acquire the per-CPU state. If someone is in the middle of 1020 1.1 riastrad * using it, drop the sample. Otherwise, take the lock so that 1021 1.1 riastrad * higher-priority interrupts will drop their samples. 1022 1.1 riastrad */ 1023 1.1 riastrad ec = percpu_getref(entropy_percpu); 1024 1.40 riastrad if (ec->ec_locked) { 1025 1.40 riastrad ec->ec_evcnt->intrdrop.ev_count++; 1026 1.1 riastrad goto out0; 1027 1.40 riastrad } 1028 1.1 riastrad ec->ec_locked = true; 1029 1.1 riastrad __insn_barrier(); 1030 1.1 riastrad 1031 1.1 riastrad /* 1032 1.1 riastrad * Enter as much as we can into the per-CPU pool. If it was 1033 1.1 riastrad * truncated, schedule a softint to stir the pool and stop. 1034 1.1 riastrad */ 1035 1.1 riastrad if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 1036 1.63 riastrad if (__predict_true(!cold)) 1037 1.63 riastrad softint_schedule(entropy_sih); 1038 1.40 riastrad ec->ec_evcnt->intrtrunc.ev_count++; 1039 1.1 riastrad goto out1; 1040 1.1 riastrad } 1041 1.1 riastrad fullyused = true; 1042 1.1 riastrad 1043 1.62 riastrad /* 1044 1.62 riastrad * Count up what we can contribute. 1045 1.62 riastrad * 1046 1.62 riastrad * We don't count samples while cold because entropy_timer 1047 1.62 riastrad * might still be returning zero if there's no CPU cycle 1048 1.62 riastrad * counter. 1049 1.62 riastrad */ 1050 1.62 riastrad bitspending = ec->ec_bitspending; 1051 1.62 riastrad bitspending += MIN(MINENTROPYBITS - bitspending, nbits); 1052 1.62 riastrad atomic_store_relaxed(&ec->ec_bitspending, bitspending); 1053 1.62 riastrad if (__predict_true(count)) { 1054 1.62 riastrad samplespending = ec->ec_samplespending; 1055 1.62 riastrad samplespending += MIN(MINSAMPLES - samplespending, 1); 1056 1.62 riastrad atomic_store_relaxed(&ec->ec_samplespending, samplespending); 1057 1.62 riastrad } 1058 1.1 riastrad 1059 1.1 riastrad /* Schedule a softint if we added anything and it matters. */ 1060 1.62 riastrad if (__predict_false(atomic_load_relaxed(&E->bitsneeded) || 1061 1.1 riastrad atomic_load_relaxed(&entropy_depletion)) && 1062 1.63 riastrad (nbits != 0 || count) && 1063 1.63 riastrad __predict_true(!cold)) 1064 1.63 riastrad softint_schedule(entropy_sih); 1065 1.1 riastrad 1066 1.1 riastrad out1: /* Release the per-CPU state. */ 1067 1.1 riastrad KASSERT(ec->ec_locked); 1068 1.1 riastrad __insn_barrier(); 1069 1.1 riastrad ec->ec_locked = false; 1070 1.1 riastrad out0: percpu_putref(entropy_percpu); 1071 1.63 riastrad splx(s); 1072 1.1 riastrad 1073 1.1 riastrad return fullyused; 1074 1.1 riastrad } 1075 1.1 riastrad 1076 1.1 riastrad /* 1077 1.1 riastrad * entropy_softintr(cookie) 1078 1.1 riastrad * 1079 1.1 riastrad * Soft interrupt handler for entering entropy. Takes care of 1080 1.1 riastrad * stirring the local CPU's entropy pool if it filled up during 1081 1.1 riastrad * hard interrupts, and promptly crediting entropy from the local 1082 1.1 riastrad * CPU's entropy pool to the global entropy pool if needed. 1083 1.1 riastrad */ 1084 1.1 riastrad static void 1085 1.1 riastrad entropy_softintr(void *cookie) 1086 1.1 riastrad { 1087 1.43 riastrad struct entropy_cpu_lock lock; 1088 1.1 riastrad struct entropy_cpu *ec; 1089 1.62 riastrad unsigned bitspending, samplespending; 1090 1.1 riastrad 1091 1.1 riastrad /* 1092 1.43 riastrad * With the per-CPU state locked, stir the pool if necessary 1093 1.43 riastrad * and determine if there's any pending entropy on this CPU to 1094 1.43 riastrad * account globally. 1095 1.1 riastrad */ 1096 1.43 riastrad ec = entropy_cpu_get(&lock); 1097 1.40 riastrad ec->ec_evcnt->softint.ev_count++; 1098 1.1 riastrad entpool_stir(ec->ec_pool); 1099 1.62 riastrad bitspending = ec->ec_bitspending; 1100 1.62 riastrad samplespending = ec->ec_samplespending; 1101 1.43 riastrad entropy_cpu_put(&lock, ec); 1102 1.42 riastrad 1103 1.42 riastrad /* Consolidate globally if appropriate based on what we added. */ 1104 1.62 riastrad if (bitspending > 0 || samplespending >= MINSAMPLES) 1105 1.42 riastrad entropy_account_cpu(ec); 1106 1.1 riastrad } 1107 1.1 riastrad 1108 1.1 riastrad /* 1109 1.1 riastrad * entropy_thread(cookie) 1110 1.1 riastrad * 1111 1.1 riastrad * Handle any asynchronous entropy housekeeping. 1112 1.1 riastrad */ 1113 1.1 riastrad static void 1114 1.1 riastrad entropy_thread(void *cookie) 1115 1.1 riastrad { 1116 1.3 riastrad bool consolidate; 1117 1.1 riastrad 1118 1.64 riastrad #ifndef _RUMPKERNEL /* XXX rump starts threads before cold */ 1119 1.63 riastrad KASSERT(!cold); 1120 1.64 riastrad #endif 1121 1.63 riastrad 1122 1.1 riastrad for (;;) { 1123 1.1 riastrad /* 1124 1.3 riastrad * Wait until there's full entropy somewhere among the 1125 1.3 riastrad * CPUs, as confirmed at most once per minute, or 1126 1.3 riastrad * someone wants to consolidate. 1127 1.1 riastrad */ 1128 1.62 riastrad if (entropy_pending()) { 1129 1.3 riastrad consolidate = true; 1130 1.3 riastrad } else { 1131 1.3 riastrad mutex_enter(&E->lock); 1132 1.3 riastrad if (!E->consolidate) 1133 1.3 riastrad cv_timedwait(&E->cv, &E->lock, 60*hz); 1134 1.3 riastrad consolidate = E->consolidate; 1135 1.3 riastrad E->consolidate = false; 1136 1.3 riastrad mutex_exit(&E->lock); 1137 1.1 riastrad } 1138 1.1 riastrad 1139 1.3 riastrad if (consolidate) { 1140 1.3 riastrad /* Do it. */ 1141 1.13 riastrad entropy_do_consolidate(); 1142 1.1 riastrad 1143 1.3 riastrad /* Mitigate abuse. */ 1144 1.3 riastrad kpause("entropy", false, hz, NULL); 1145 1.3 riastrad } 1146 1.1 riastrad } 1147 1.1 riastrad } 1148 1.1 riastrad 1149 1.62 riastrad struct entropy_pending_count { 1150 1.62 riastrad uint32_t bitspending; 1151 1.62 riastrad uint32_t samplespending; 1152 1.62 riastrad }; 1153 1.62 riastrad 1154 1.1 riastrad /* 1155 1.1 riastrad * entropy_pending() 1156 1.1 riastrad * 1157 1.62 riastrad * True if enough bits or samples are pending on other CPUs to 1158 1.62 riastrad * warrant consolidation. 1159 1.1 riastrad */ 1160 1.62 riastrad static bool 1161 1.1 riastrad entropy_pending(void) 1162 1.1 riastrad { 1163 1.62 riastrad struct entropy_pending_count count = { 0, 0 }, *C = &count; 1164 1.1 riastrad 1165 1.62 riastrad percpu_foreach(entropy_percpu, &entropy_pending_cpu, C); 1166 1.62 riastrad return C->bitspending >= MINENTROPYBITS || 1167 1.62 riastrad C->samplespending >= MINSAMPLES; 1168 1.1 riastrad } 1169 1.1 riastrad 1170 1.1 riastrad static void 1171 1.1 riastrad entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1172 1.1 riastrad { 1173 1.1 riastrad struct entropy_cpu *ec = ptr; 1174 1.62 riastrad struct entropy_pending_count *C = cookie; 1175 1.62 riastrad uint32_t cpu_bitspending; 1176 1.62 riastrad uint32_t cpu_samplespending; 1177 1.62 riastrad 1178 1.62 riastrad cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending); 1179 1.62 riastrad cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending); 1180 1.62 riastrad C->bitspending += MIN(MINENTROPYBITS - C->bitspending, 1181 1.62 riastrad cpu_bitspending); 1182 1.62 riastrad C->samplespending += MIN(MINSAMPLES - C->samplespending, 1183 1.62 riastrad cpu_samplespending); 1184 1.1 riastrad } 1185 1.1 riastrad 1186 1.1 riastrad /* 1187 1.13 riastrad * entropy_do_consolidate() 1188 1.1 riastrad * 1189 1.1 riastrad * Issue a cross-call to gather entropy on all CPUs and advance 1190 1.1 riastrad * the entropy epoch. 1191 1.1 riastrad */ 1192 1.1 riastrad static void 1193 1.13 riastrad entropy_do_consolidate(void) 1194 1.1 riastrad { 1195 1.1 riastrad static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1196 1.1 riastrad static struct timeval lasttime; /* serialized by E->lock */ 1197 1.19 riastrad struct entpool pool; 1198 1.19 riastrad uint8_t buf[ENTPOOL_CAPACITY]; 1199 1.62 riastrad unsigned bitsdiff, samplesdiff; 1200 1.1 riastrad uint64_t ticket; 1201 1.1 riastrad 1202 1.63 riastrad KASSERT(!cold); 1203 1.63 riastrad ASSERT_SLEEPABLE(); 1204 1.63 riastrad 1205 1.19 riastrad /* Gather entropy on all CPUs into a temporary pool. */ 1206 1.19 riastrad memset(&pool, 0, sizeof pool); 1207 1.19 riastrad ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1208 1.1 riastrad xc_wait(ticket); 1209 1.1 riastrad 1210 1.1 riastrad /* Acquire the lock to notify waiters. */ 1211 1.1 riastrad mutex_enter(&E->lock); 1212 1.1 riastrad 1213 1.1 riastrad /* Count another consolidation. */ 1214 1.1 riastrad entropy_consolidate_evcnt.ev_count++; 1215 1.1 riastrad 1216 1.1 riastrad /* Note when we last consolidated, i.e. now. */ 1217 1.1 riastrad E->timestamp = time_uptime; 1218 1.1 riastrad 1219 1.19 riastrad /* Mix what we gathered into the global pool. */ 1220 1.19 riastrad entpool_extract(&pool, buf, sizeof buf); 1221 1.19 riastrad entpool_enter(&E->pool, buf, sizeof buf); 1222 1.19 riastrad explicit_memset(&pool, 0, sizeof pool); 1223 1.19 riastrad 1224 1.1 riastrad /* Count the entropy that was gathered. */ 1225 1.62 riastrad bitsdiff = MIN(E->bitsneeded, E->bitspending); 1226 1.62 riastrad atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff); 1227 1.62 riastrad E->bitspending -= bitsdiff; 1228 1.62 riastrad if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) { 1229 1.50 riastrad if ((boothowto & AB_DEBUG) != 0 && 1230 1.50 riastrad ratecheck(&lasttime, &interval)) { 1231 1.50 riastrad printf("WARNING:" 1232 1.1 riastrad " consolidating less than full entropy\n"); 1233 1.30 jmcneill } 1234 1.1 riastrad } 1235 1.1 riastrad 1236 1.62 riastrad samplesdiff = MIN(E->samplesneeded, E->samplespending); 1237 1.62 riastrad atomic_store_relaxed(&E->samplesneeded, 1238 1.62 riastrad E->samplesneeded - samplesdiff); 1239 1.62 riastrad E->samplespending -= samplesdiff; 1240 1.62 riastrad 1241 1.1 riastrad /* Advance the epoch and notify waiters. */ 1242 1.1 riastrad entropy_notify(); 1243 1.1 riastrad 1244 1.1 riastrad /* Release the lock. */ 1245 1.1 riastrad mutex_exit(&E->lock); 1246 1.1 riastrad } 1247 1.1 riastrad 1248 1.1 riastrad /* 1249 1.20 riastrad * entropy_consolidate_xc(vpool, arg2) 1250 1.1 riastrad * 1251 1.1 riastrad * Extract output from the local CPU's input pool and enter it 1252 1.20 riastrad * into a temporary pool passed as vpool. 1253 1.1 riastrad */ 1254 1.1 riastrad static void 1255 1.19 riastrad entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1256 1.1 riastrad { 1257 1.19 riastrad struct entpool *pool = vpool; 1258 1.43 riastrad struct entropy_cpu_lock lock; 1259 1.1 riastrad struct entropy_cpu *ec; 1260 1.1 riastrad uint8_t buf[ENTPOOL_CAPACITY]; 1261 1.1 riastrad uint32_t extra[7]; 1262 1.1 riastrad unsigned i = 0; 1263 1.1 riastrad 1264 1.1 riastrad /* Grab CPU number and cycle counter to mix extra into the pool. */ 1265 1.1 riastrad extra[i++] = cpu_number(); 1266 1.1 riastrad extra[i++] = entropy_timer(); 1267 1.1 riastrad 1268 1.1 riastrad /* 1269 1.43 riastrad * With the per-CPU state locked, extract from the per-CPU pool 1270 1.43 riastrad * and count it as no longer pending. 1271 1.1 riastrad */ 1272 1.43 riastrad ec = entropy_cpu_get(&lock); 1273 1.1 riastrad extra[i++] = entropy_timer(); 1274 1.1 riastrad entpool_extract(ec->ec_pool, buf, sizeof buf); 1275 1.62 riastrad atomic_store_relaxed(&ec->ec_bitspending, 0); 1276 1.62 riastrad atomic_store_relaxed(&ec->ec_samplespending, 0); 1277 1.1 riastrad extra[i++] = entropy_timer(); 1278 1.43 riastrad entropy_cpu_put(&lock, ec); 1279 1.1 riastrad extra[i++] = entropy_timer(); 1280 1.1 riastrad 1281 1.1 riastrad /* 1282 1.1 riastrad * Copy over statistics, and enter the per-CPU extract and the 1283 1.19 riastrad * extra timing into the temporary pool, under the global lock. 1284 1.1 riastrad */ 1285 1.1 riastrad mutex_enter(&E->lock); 1286 1.1 riastrad extra[i++] = entropy_timer(); 1287 1.19 riastrad entpool_enter(pool, buf, sizeof buf); 1288 1.1 riastrad explicit_memset(buf, 0, sizeof buf); 1289 1.1 riastrad extra[i++] = entropy_timer(); 1290 1.1 riastrad KASSERT(i == __arraycount(extra)); 1291 1.19 riastrad entpool_enter(pool, extra, sizeof extra); 1292 1.1 riastrad explicit_memset(extra, 0, sizeof extra); 1293 1.1 riastrad mutex_exit(&E->lock); 1294 1.1 riastrad } 1295 1.1 riastrad 1296 1.1 riastrad /* 1297 1.1 riastrad * entropy_notify() 1298 1.1 riastrad * 1299 1.1 riastrad * Caller just contributed entropy to the global pool. Advance 1300 1.1 riastrad * the entropy epoch and notify waiters. 1301 1.1 riastrad * 1302 1.62 riastrad * Caller must hold the global entropy lock. 1303 1.1 riastrad */ 1304 1.1 riastrad static void 1305 1.1 riastrad entropy_notify(void) 1306 1.1 riastrad { 1307 1.12 riastrad static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1308 1.12 riastrad static struct timeval lasttime; /* serialized by E->lock */ 1309 1.62 riastrad static bool ready = false, besteffort = false; 1310 1.1 riastrad unsigned epoch; 1311 1.1 riastrad 1312 1.63 riastrad KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1313 1.1 riastrad 1314 1.1 riastrad /* 1315 1.1 riastrad * If this is the first time, print a message to the console 1316 1.1 riastrad * that we're ready so operators can compare it to the timing 1317 1.1 riastrad * of other events. 1318 1.62 riastrad * 1319 1.62 riastrad * If we didn't get full entropy from reliable sources, report 1320 1.62 riastrad * instead that we are running on fumes with best effort. (If 1321 1.62 riastrad * we ever do get full entropy after that, print the ready 1322 1.62 riastrad * message once.) 1323 1.62 riastrad */ 1324 1.62 riastrad if (__predict_false(!ready)) { 1325 1.62 riastrad if (E->bitsneeded == 0) { 1326 1.62 riastrad printf("entropy: ready\n"); 1327 1.62 riastrad ready = true; 1328 1.62 riastrad } else if (E->samplesneeded == 0 && !besteffort) { 1329 1.62 riastrad printf("entropy: best effort\n"); 1330 1.62 riastrad besteffort = true; 1331 1.62 riastrad } 1332 1.62 riastrad } 1333 1.1 riastrad 1334 1.1 riastrad /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1335 1.12 riastrad if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1336 1.12 riastrad ratecheck(&lasttime, &interval)) { 1337 1.12 riastrad epoch = E->epoch + 1; 1338 1.12 riastrad if (epoch == 0 || epoch == (unsigned)-1) 1339 1.12 riastrad epoch = 1; 1340 1.12 riastrad atomic_store_relaxed(&E->epoch, epoch); 1341 1.12 riastrad } 1342 1.41 riastrad KASSERT(E->epoch != (unsigned)-1); 1343 1.1 riastrad 1344 1.1 riastrad /* Notify waiters. */ 1345 1.63 riastrad if (__predict_true(!cold)) { 1346 1.1 riastrad cv_broadcast(&E->cv); 1347 1.1 riastrad selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1348 1.1 riastrad } 1349 1.1 riastrad 1350 1.1 riastrad /* Count another notification. */ 1351 1.1 riastrad entropy_notify_evcnt.ev_count++; 1352 1.1 riastrad } 1353 1.1 riastrad 1354 1.1 riastrad /* 1355 1.13 riastrad * entropy_consolidate() 1356 1.13 riastrad * 1357 1.67 riastrad * Trigger entropy consolidation and wait for it to complete, or 1358 1.67 riastrad * return EINTR if interrupted by a signal. 1359 1.13 riastrad * 1360 1.13 riastrad * This should be used sparingly, not periodically -- requiring 1361 1.13 riastrad * conscious intervention by the operator or a clear policy 1362 1.13 riastrad * decision. Otherwise, the kernel will automatically consolidate 1363 1.13 riastrad * when enough entropy has been gathered into per-CPU pools to 1364 1.13 riastrad * transition to full entropy. 1365 1.13 riastrad */ 1366 1.67 riastrad int 1367 1.72 riastrad entropy_consolidate(void) 1368 1.13 riastrad { 1369 1.13 riastrad uint64_t ticket; 1370 1.13 riastrad int error; 1371 1.13 riastrad 1372 1.63 riastrad KASSERT(!cold); 1373 1.63 riastrad ASSERT_SLEEPABLE(); 1374 1.13 riastrad 1375 1.13 riastrad mutex_enter(&E->lock); 1376 1.13 riastrad ticket = entropy_consolidate_evcnt.ev_count; 1377 1.13 riastrad E->consolidate = true; 1378 1.13 riastrad cv_broadcast(&E->cv); 1379 1.13 riastrad while (ticket == entropy_consolidate_evcnt.ev_count) { 1380 1.13 riastrad error = cv_wait_sig(&E->cv, &E->lock); 1381 1.13 riastrad if (error) 1382 1.13 riastrad break; 1383 1.13 riastrad } 1384 1.13 riastrad mutex_exit(&E->lock); 1385 1.67 riastrad 1386 1.67 riastrad return error; 1387 1.13 riastrad } 1388 1.13 riastrad 1389 1.13 riastrad /* 1390 1.1 riastrad * sysctl -w kern.entropy.consolidate=1 1391 1.1 riastrad * 1392 1.1 riastrad * Trigger entropy consolidation and wait for it to complete. 1393 1.13 riastrad * Writable only by superuser. This, writing to /dev/random, and 1394 1.13 riastrad * ioctl(RNDADDDATA) are the only ways for the system to 1395 1.13 riastrad * consolidate entropy if the operator knows something the kernel 1396 1.13 riastrad * doesn't about how unpredictable the pending entropy pools are. 1397 1.1 riastrad */ 1398 1.1 riastrad static int 1399 1.1 riastrad sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1400 1.1 riastrad { 1401 1.1 riastrad struct sysctlnode node = *rnode; 1402 1.57 riastrad int arg = 0; 1403 1.1 riastrad int error; 1404 1.1 riastrad 1405 1.1 riastrad node.sysctl_data = &arg; 1406 1.1 riastrad error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1407 1.1 riastrad if (error || newp == NULL) 1408 1.1 riastrad return error; 1409 1.13 riastrad if (arg) 1410 1.72 riastrad error = entropy_consolidate(); 1411 1.1 riastrad 1412 1.1 riastrad return error; 1413 1.1 riastrad } 1414 1.1 riastrad 1415 1.1 riastrad /* 1416 1.70 riastrad * entropy_gather() 1417 1.70 riastrad * 1418 1.70 riastrad * Trigger gathering entropy from all on-demand sources, and, if 1419 1.70 riastrad * requested, wait for synchronous sources (but not asynchronous 1420 1.70 riastrad * sources) to complete, or fail with EINTR if interrupted by a 1421 1.70 riastrad * signal. 1422 1.70 riastrad */ 1423 1.70 riastrad int 1424 1.70 riastrad entropy_gather(void) 1425 1.70 riastrad { 1426 1.70 riastrad int error; 1427 1.70 riastrad 1428 1.70 riastrad mutex_enter(&E->lock); 1429 1.70 riastrad error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT|ENTROPY_SIG); 1430 1.70 riastrad mutex_exit(&E->lock); 1431 1.70 riastrad 1432 1.70 riastrad return error; 1433 1.70 riastrad } 1434 1.70 riastrad 1435 1.70 riastrad /* 1436 1.10 riastrad * sysctl -w kern.entropy.gather=1 1437 1.10 riastrad * 1438 1.10 riastrad * Trigger gathering entropy from all on-demand sources, and wait 1439 1.10 riastrad * for synchronous sources (but not asynchronous sources) to 1440 1.10 riastrad * complete. Writable only by superuser. 1441 1.10 riastrad */ 1442 1.10 riastrad static int 1443 1.10 riastrad sysctl_entropy_gather(SYSCTLFN_ARGS) 1444 1.10 riastrad { 1445 1.10 riastrad struct sysctlnode node = *rnode; 1446 1.57 riastrad int arg = 0; 1447 1.10 riastrad int error; 1448 1.10 riastrad 1449 1.10 riastrad node.sysctl_data = &arg; 1450 1.10 riastrad error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1451 1.10 riastrad if (error || newp == NULL) 1452 1.10 riastrad return error; 1453 1.70 riastrad if (arg) 1454 1.70 riastrad error = entropy_gather(); 1455 1.10 riastrad 1456 1.69 riastrad return error; 1457 1.10 riastrad } 1458 1.10 riastrad 1459 1.10 riastrad /* 1460 1.1 riastrad * entropy_extract(buf, len, flags) 1461 1.1 riastrad * 1462 1.1 riastrad * Extract len bytes from the global entropy pool into buf. 1463 1.1 riastrad * 1464 1.55 riastrad * Caller MUST NOT expose these bytes directly -- must use them 1465 1.55 riastrad * ONLY to seed a cryptographic pseudorandom number generator 1466 1.55 riastrad * (`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'), 1467 1.55 riastrad * and then erase them. entropy_extract does not, on its own, 1468 1.55 riastrad * provide backtracking resistance -- it must be combined with a 1469 1.55 riastrad * PRNG/DRBG that does. 1470 1.55 riastrad * 1471 1.63 riastrad * This may be used very early at boot, before even entropy_init 1472 1.63 riastrad * has been called. 1473 1.63 riastrad * 1474 1.55 riastrad * You generally shouldn't use this directly -- use cprng(9) 1475 1.55 riastrad * instead. 1476 1.55 riastrad * 1477 1.1 riastrad * Flags may have: 1478 1.1 riastrad * 1479 1.1 riastrad * ENTROPY_WAIT Wait for entropy if not available yet. 1480 1.1 riastrad * ENTROPY_SIG Allow interruption by a signal during wait. 1481 1.23 riastrad * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1482 1.23 riastrad * or fail without filling it at all. 1483 1.1 riastrad * 1484 1.1 riastrad * Return zero on success, or error on failure: 1485 1.1 riastrad * 1486 1.1 riastrad * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1487 1.1 riastrad * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1488 1.1 riastrad * 1489 1.1 riastrad * If ENTROPY_WAIT is set, allowed only in thread context. If 1490 1.65 riastrad * ENTROPY_WAIT is not set, allowed also in softint context -- may 1491 1.65 riastrad * sleep on an adaptive lock up to IPL_SOFTSERIAL. Forbidden in 1492 1.65 riastrad * hard interrupt context. 1493 1.1 riastrad */ 1494 1.1 riastrad int 1495 1.1 riastrad entropy_extract(void *buf, size_t len, int flags) 1496 1.1 riastrad { 1497 1.1 riastrad static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1498 1.1 riastrad static struct timeval lasttime; /* serialized by E->lock */ 1499 1.62 riastrad bool printed = false; 1500 1.63 riastrad int s = -1/*XXXGCC*/, error; 1501 1.1 riastrad 1502 1.1 riastrad if (ISSET(flags, ENTROPY_WAIT)) { 1503 1.1 riastrad ASSERT_SLEEPABLE(); 1504 1.63 riastrad KASSERT(!cold); 1505 1.1 riastrad } 1506 1.1 riastrad 1507 1.35 riastrad /* Refuse to operate in interrupt context. */ 1508 1.35 riastrad KASSERT(!cpu_intr_p()); 1509 1.35 riastrad 1510 1.63 riastrad /* 1511 1.63 riastrad * If we're cold, we are only contending with interrupts on the 1512 1.63 riastrad * current CPU, so block them. Otherwise, we are _not_ 1513 1.63 riastrad * contending with interrupts on the current CPU, but we are 1514 1.63 riastrad * contending with other threads, to exclude them with a mutex. 1515 1.63 riastrad */ 1516 1.63 riastrad if (__predict_false(cold)) 1517 1.63 riastrad s = splhigh(); 1518 1.63 riastrad else 1519 1.1 riastrad mutex_enter(&E->lock); 1520 1.1 riastrad 1521 1.1 riastrad /* Wait until there is enough entropy in the system. */ 1522 1.1 riastrad error = 0; 1523 1.62 riastrad if (E->bitsneeded > 0 && E->samplesneeded == 0) { 1524 1.62 riastrad /* 1525 1.62 riastrad * We don't have full entropy from reliable sources, 1526 1.62 riastrad * but we gathered a plausible number of samples from 1527 1.62 riastrad * other sources such as timers. Try asking for more 1528 1.62 riastrad * from any sources we can, but don't worry if it 1529 1.62 riastrad * fails -- best effort. 1530 1.62 riastrad */ 1531 1.62 riastrad (void)entropy_request(ENTROPY_CAPACITY, flags); 1532 1.62 riastrad } else while (E->bitsneeded > 0 && E->samplesneeded > 0) { 1533 1.1 riastrad /* Ask for more, synchronously if possible. */ 1534 1.49 riastrad error = entropy_request(len, flags); 1535 1.49 riastrad if (error) 1536 1.49 riastrad break; 1537 1.1 riastrad 1538 1.1 riastrad /* If we got enough, we're done. */ 1539 1.62 riastrad if (E->bitsneeded == 0 || E->samplesneeded == 0) { 1540 1.1 riastrad KASSERT(error == 0); 1541 1.1 riastrad break; 1542 1.1 riastrad } 1543 1.1 riastrad 1544 1.1 riastrad /* If not waiting, stop here. */ 1545 1.1 riastrad if (!ISSET(flags, ENTROPY_WAIT)) { 1546 1.1 riastrad error = EWOULDBLOCK; 1547 1.1 riastrad break; 1548 1.1 riastrad } 1549 1.1 riastrad 1550 1.1 riastrad /* Wait for some entropy to come in and try again. */ 1551 1.63 riastrad KASSERT(!cold); 1552 1.62 riastrad if (!printed) { 1553 1.62 riastrad printf("entropy: pid %d (%s) waiting for entropy(7)\n", 1554 1.62 riastrad curproc->p_pid, curproc->p_comm); 1555 1.62 riastrad printed = true; 1556 1.62 riastrad } 1557 1.24 gson 1558 1.1 riastrad if (ISSET(flags, ENTROPY_SIG)) { 1559 1.62 riastrad error = cv_timedwait_sig(&E->cv, &E->lock, hz); 1560 1.62 riastrad if (error && error != EWOULDBLOCK) 1561 1.1 riastrad break; 1562 1.1 riastrad } else { 1563 1.62 riastrad cv_timedwait(&E->cv, &E->lock, hz); 1564 1.1 riastrad } 1565 1.1 riastrad } 1566 1.1 riastrad 1567 1.23 riastrad /* 1568 1.23 riastrad * Count failure -- but fill the buffer nevertheless, unless 1569 1.23 riastrad * the caller specified ENTROPY_HARDFAIL. 1570 1.23 riastrad */ 1571 1.23 riastrad if (error) { 1572 1.23 riastrad if (ISSET(flags, ENTROPY_HARDFAIL)) 1573 1.23 riastrad goto out; 1574 1.1 riastrad entropy_extract_fail_evcnt.ev_count++; 1575 1.23 riastrad } 1576 1.1 riastrad 1577 1.1 riastrad /* 1578 1.62 riastrad * Report a warning if we haven't yet reached full entropy. 1579 1.1 riastrad * This is the only case where we consider entropy to be 1580 1.1 riastrad * `depleted' without kern.entropy.depletion enabled -- when we 1581 1.1 riastrad * only have partial entropy, an adversary may be able to 1582 1.1 riastrad * narrow the state of the pool down to a small number of 1583 1.1 riastrad * possibilities; the output then enables them to confirm a 1584 1.1 riastrad * guess, reducing its entropy from the adversary's perspective 1585 1.1 riastrad * to zero. 1586 1.62 riastrad * 1587 1.62 riastrad * This should only happen if the operator has chosen to 1588 1.62 riastrad * consolidate, either through sysctl kern.entropy.consolidate 1589 1.62 riastrad * or by writing less than full entropy to /dev/random as root 1590 1.62 riastrad * (which /dev/random promises will immediately affect 1591 1.62 riastrad * subsequent output, for better or worse). 1592 1.1 riastrad */ 1593 1.62 riastrad if (E->bitsneeded > 0 && E->samplesneeded > 0) { 1594 1.62 riastrad if (__predict_false(E->epoch == (unsigned)-1) && 1595 1.62 riastrad ratecheck(&lasttime, &interval)) { 1596 1.50 riastrad printf("WARNING:" 1597 1.50 riastrad " system needs entropy for security;" 1598 1.50 riastrad " see entropy(7)\n"); 1599 1.62 riastrad } 1600 1.62 riastrad atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); 1601 1.62 riastrad atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); 1602 1.1 riastrad } 1603 1.1 riastrad 1604 1.1 riastrad /* Extract data from the pool, and `deplete' if we're doing that. */ 1605 1.1 riastrad entpool_extract(&E->pool, buf, len); 1606 1.1 riastrad if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1607 1.1 riastrad error == 0) { 1608 1.1 riastrad unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1609 1.62 riastrad unsigned bitsneeded = E->bitsneeded; 1610 1.62 riastrad unsigned samplesneeded = E->samplesneeded; 1611 1.1 riastrad 1612 1.62 riastrad bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost); 1613 1.62 riastrad samplesneeded += MIN(MINSAMPLES - samplesneeded, cost); 1614 1.62 riastrad 1615 1.62 riastrad atomic_store_relaxed(&E->bitsneeded, bitsneeded); 1616 1.62 riastrad atomic_store_relaxed(&E->samplesneeded, samplesneeded); 1617 1.1 riastrad entropy_deplete_evcnt.ev_count++; 1618 1.1 riastrad } 1619 1.1 riastrad 1620 1.23 riastrad out: /* Release the global lock and return the error. */ 1621 1.63 riastrad if (__predict_false(cold)) 1622 1.63 riastrad splx(s); 1623 1.63 riastrad else 1624 1.1 riastrad mutex_exit(&E->lock); 1625 1.1 riastrad return error; 1626 1.1 riastrad } 1627 1.1 riastrad 1628 1.1 riastrad /* 1629 1.1 riastrad * entropy_poll(events) 1630 1.1 riastrad * 1631 1.1 riastrad * Return the subset of events ready, and if it is not all of 1632 1.1 riastrad * events, record curlwp as waiting for entropy. 1633 1.1 riastrad */ 1634 1.1 riastrad int 1635 1.1 riastrad entropy_poll(int events) 1636 1.1 riastrad { 1637 1.1 riastrad int revents = 0; 1638 1.1 riastrad 1639 1.63 riastrad KASSERT(!cold); 1640 1.1 riastrad 1641 1.1 riastrad /* Always ready for writing. */ 1642 1.1 riastrad revents |= events & (POLLOUT|POLLWRNORM); 1643 1.1 riastrad 1644 1.1 riastrad /* Narrow it down to reads. */ 1645 1.1 riastrad events &= POLLIN|POLLRDNORM; 1646 1.1 riastrad if (events == 0) 1647 1.1 riastrad return revents; 1648 1.1 riastrad 1649 1.1 riastrad /* 1650 1.1 riastrad * If we have reached full entropy and we're not depleting 1651 1.1 riastrad * entropy, we are forever ready. 1652 1.1 riastrad */ 1653 1.62 riastrad if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 || 1654 1.62 riastrad atomic_load_relaxed(&E->samplesneeded) == 0) && 1655 1.1 riastrad __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1656 1.1 riastrad return revents | events; 1657 1.1 riastrad 1658 1.1 riastrad /* 1659 1.1 riastrad * Otherwise, check whether we need entropy under the lock. If 1660 1.1 riastrad * we don't, we're ready; if we do, add ourselves to the queue. 1661 1.1 riastrad */ 1662 1.1 riastrad mutex_enter(&E->lock); 1663 1.62 riastrad if (E->bitsneeded == 0 || E->samplesneeded == 0) 1664 1.1 riastrad revents |= events; 1665 1.1 riastrad else 1666 1.1 riastrad selrecord(curlwp, &E->selq); 1667 1.1 riastrad mutex_exit(&E->lock); 1668 1.1 riastrad 1669 1.1 riastrad return revents; 1670 1.1 riastrad } 1671 1.1 riastrad 1672 1.1 riastrad /* 1673 1.1 riastrad * filt_entropy_read_detach(kn) 1674 1.1 riastrad * 1675 1.1 riastrad * struct filterops::f_detach callback for entropy read events: 1676 1.1 riastrad * remove kn from the list of waiters. 1677 1.1 riastrad */ 1678 1.1 riastrad static void 1679 1.1 riastrad filt_entropy_read_detach(struct knote *kn) 1680 1.1 riastrad { 1681 1.1 riastrad 1682 1.63 riastrad KASSERT(!cold); 1683 1.1 riastrad 1684 1.1 riastrad mutex_enter(&E->lock); 1685 1.25 thorpej selremove_knote(&E->selq, kn); 1686 1.1 riastrad mutex_exit(&E->lock); 1687 1.1 riastrad } 1688 1.1 riastrad 1689 1.1 riastrad /* 1690 1.1 riastrad * filt_entropy_read_event(kn, hint) 1691 1.1 riastrad * 1692 1.1 riastrad * struct filterops::f_event callback for entropy read events: 1693 1.1 riastrad * poll for entropy. Caller must hold the global entropy lock if 1694 1.1 riastrad * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1695 1.1 riastrad */ 1696 1.1 riastrad static int 1697 1.1 riastrad filt_entropy_read_event(struct knote *kn, long hint) 1698 1.1 riastrad { 1699 1.1 riastrad int ret; 1700 1.1 riastrad 1701 1.63 riastrad KASSERT(!cold); 1702 1.1 riastrad 1703 1.1 riastrad /* Acquire the lock, if caller is outside entropy subsystem. */ 1704 1.1 riastrad if (hint == NOTE_SUBMIT) 1705 1.1 riastrad KASSERT(mutex_owned(&E->lock)); 1706 1.1 riastrad else 1707 1.1 riastrad mutex_enter(&E->lock); 1708 1.1 riastrad 1709 1.1 riastrad /* 1710 1.1 riastrad * If we still need entropy, can't read anything; if not, can 1711 1.1 riastrad * read arbitrarily much. 1712 1.1 riastrad */ 1713 1.62 riastrad if (E->bitsneeded != 0 && E->samplesneeded != 0) { 1714 1.1 riastrad ret = 0; 1715 1.1 riastrad } else { 1716 1.1 riastrad if (atomic_load_relaxed(&entropy_depletion)) 1717 1.58 riastrad kn->kn_data = ENTROPY_CAPACITY; /* bytes */ 1718 1.1 riastrad else 1719 1.1 riastrad kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1720 1.1 riastrad ret = 1; 1721 1.1 riastrad } 1722 1.1 riastrad 1723 1.1 riastrad /* Release the lock, if caller is outside entropy subsystem. */ 1724 1.1 riastrad if (hint == NOTE_SUBMIT) 1725 1.1 riastrad KASSERT(mutex_owned(&E->lock)); 1726 1.1 riastrad else 1727 1.1 riastrad mutex_exit(&E->lock); 1728 1.1 riastrad 1729 1.1 riastrad return ret; 1730 1.1 riastrad } 1731 1.1 riastrad 1732 1.33 thorpej /* XXX Makes sense only for /dev/u?random. */ 1733 1.1 riastrad static const struct filterops entropy_read_filtops = { 1734 1.33 thorpej .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 1735 1.1 riastrad .f_attach = NULL, 1736 1.1 riastrad .f_detach = filt_entropy_read_detach, 1737 1.1 riastrad .f_event = filt_entropy_read_event, 1738 1.1 riastrad }; 1739 1.1 riastrad 1740 1.1 riastrad /* 1741 1.1 riastrad * entropy_kqfilter(kn) 1742 1.1 riastrad * 1743 1.1 riastrad * Register kn to receive entropy event notifications. May be 1744 1.1 riastrad * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1745 1.1 riastrad */ 1746 1.1 riastrad int 1747 1.1 riastrad entropy_kqfilter(struct knote *kn) 1748 1.1 riastrad { 1749 1.1 riastrad 1750 1.63 riastrad KASSERT(!cold); 1751 1.1 riastrad 1752 1.1 riastrad switch (kn->kn_filter) { 1753 1.1 riastrad case EVFILT_READ: 1754 1.1 riastrad /* Enter into the global select queue. */ 1755 1.1 riastrad mutex_enter(&E->lock); 1756 1.1 riastrad kn->kn_fop = &entropy_read_filtops; 1757 1.25 thorpej selrecord_knote(&E->selq, kn); 1758 1.1 riastrad mutex_exit(&E->lock); 1759 1.1 riastrad return 0; 1760 1.1 riastrad case EVFILT_WRITE: 1761 1.1 riastrad /* Can always dump entropy into the system. */ 1762 1.1 riastrad kn->kn_fop = &seltrue_filtops; 1763 1.1 riastrad return 0; 1764 1.1 riastrad default: 1765 1.1 riastrad return EINVAL; 1766 1.1 riastrad } 1767 1.1 riastrad } 1768 1.1 riastrad 1769 1.1 riastrad /* 1770 1.1 riastrad * rndsource_setcb(rs, get, getarg) 1771 1.1 riastrad * 1772 1.1 riastrad * Set the request callback for the entropy source rs, if it can 1773 1.1 riastrad * provide entropy on demand. Must precede rnd_attach_source. 1774 1.1 riastrad */ 1775 1.1 riastrad void 1776 1.1 riastrad rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1777 1.1 riastrad void *getarg) 1778 1.1 riastrad { 1779 1.1 riastrad 1780 1.1 riastrad rs->get = get; 1781 1.1 riastrad rs->getarg = getarg; 1782 1.1 riastrad } 1783 1.1 riastrad 1784 1.1 riastrad /* 1785 1.1 riastrad * rnd_attach_source(rs, name, type, flags) 1786 1.1 riastrad * 1787 1.1 riastrad * Attach the entropy source rs. Must be done after 1788 1.1 riastrad * rndsource_setcb, if any, and before any calls to rnd_add_data. 1789 1.1 riastrad */ 1790 1.1 riastrad void 1791 1.1 riastrad rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1792 1.1 riastrad uint32_t flags) 1793 1.1 riastrad { 1794 1.1 riastrad uint32_t extra[4]; 1795 1.1 riastrad unsigned i = 0; 1796 1.1 riastrad 1797 1.59 riastrad KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name"); 1798 1.59 riastrad 1799 1.1 riastrad /* Grab cycle counter to mix extra into the pool. */ 1800 1.1 riastrad extra[i++] = entropy_timer(); 1801 1.1 riastrad 1802 1.1 riastrad /* 1803 1.1 riastrad * Apply some standard flags: 1804 1.1 riastrad * 1805 1.1 riastrad * - We do not bother with network devices by default, for 1806 1.1 riastrad * hysterical raisins (perhaps: because it is often the case 1807 1.1 riastrad * that an adversary can influence network packet timings). 1808 1.1 riastrad */ 1809 1.1 riastrad switch (type) { 1810 1.1 riastrad case RND_TYPE_NET: 1811 1.1 riastrad flags |= RND_FLAG_NO_COLLECT; 1812 1.1 riastrad break; 1813 1.1 riastrad } 1814 1.1 riastrad 1815 1.1 riastrad /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1816 1.1 riastrad KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1817 1.1 riastrad 1818 1.1 riastrad /* Initialize the random source. */ 1819 1.1 riastrad memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1820 1.1 riastrad strlcpy(rs->name, name, sizeof(rs->name)); 1821 1.28 riastrad memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 1822 1.28 riastrad memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 1823 1.9 riastrad rs->total = 0; 1824 1.1 riastrad rs->type = type; 1825 1.1 riastrad rs->flags = flags; 1826 1.63 riastrad if (entropy_percpu != NULL) 1827 1.1 riastrad rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1828 1.1 riastrad extra[i++] = entropy_timer(); 1829 1.1 riastrad 1830 1.1 riastrad /* Wire it into the global list of random sources. */ 1831 1.63 riastrad if (__predict_true(!cold)) 1832 1.1 riastrad mutex_enter(&E->lock); 1833 1.1 riastrad LIST_INSERT_HEAD(&E->sources, rs, list); 1834 1.63 riastrad if (__predict_true(!cold)) 1835 1.1 riastrad mutex_exit(&E->lock); 1836 1.1 riastrad extra[i++] = entropy_timer(); 1837 1.1 riastrad 1838 1.1 riastrad /* Request that it provide entropy ASAP, if we can. */ 1839 1.1 riastrad if (ISSET(flags, RND_FLAG_HASCB)) 1840 1.1 riastrad (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1841 1.1 riastrad extra[i++] = entropy_timer(); 1842 1.1 riastrad 1843 1.1 riastrad /* Mix the extra into the pool. */ 1844 1.1 riastrad KASSERT(i == __arraycount(extra)); 1845 1.63 riastrad entropy_enter(extra, sizeof extra, 0, /*count*/__predict_true(!cold)); 1846 1.1 riastrad explicit_memset(extra, 0, sizeof extra); 1847 1.1 riastrad } 1848 1.1 riastrad 1849 1.1 riastrad /* 1850 1.1 riastrad * rnd_detach_source(rs) 1851 1.1 riastrad * 1852 1.1 riastrad * Detach the entropy source rs. May sleep waiting for users to 1853 1.1 riastrad * drain. Further use is not allowed. 1854 1.1 riastrad */ 1855 1.1 riastrad void 1856 1.1 riastrad rnd_detach_source(struct krndsource *rs) 1857 1.1 riastrad { 1858 1.1 riastrad 1859 1.1 riastrad /* 1860 1.1 riastrad * If we're cold (shouldn't happen, but hey), just remove it 1861 1.1 riastrad * from the list -- there's nothing allocated. 1862 1.1 riastrad */ 1863 1.63 riastrad if (__predict_false(cold) && entropy_percpu == NULL) { 1864 1.1 riastrad LIST_REMOVE(rs, list); 1865 1.1 riastrad return; 1866 1.1 riastrad } 1867 1.1 riastrad 1868 1.1 riastrad /* We may have to wait for entropy_request. */ 1869 1.1 riastrad ASSERT_SLEEPABLE(); 1870 1.1 riastrad 1871 1.4 riastrad /* Wait until the source list is not in use, and remove it. */ 1872 1.1 riastrad mutex_enter(&E->lock); 1873 1.4 riastrad while (E->sourcelock) 1874 1.27 riastrad cv_wait(&E->sourcelock_cv, &E->lock); 1875 1.1 riastrad LIST_REMOVE(rs, list); 1876 1.1 riastrad mutex_exit(&E->lock); 1877 1.1 riastrad 1878 1.1 riastrad /* Free the per-CPU data. */ 1879 1.1 riastrad percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1880 1.1 riastrad } 1881 1.1 riastrad 1882 1.1 riastrad /* 1883 1.49 riastrad * rnd_lock_sources(flags) 1884 1.49 riastrad * 1885 1.49 riastrad * Lock the list of entropy sources. Caller must hold the global 1886 1.49 riastrad * entropy lock. If successful, no rndsource will go away until 1887 1.49 riastrad * rnd_unlock_sources even while the caller releases the global 1888 1.49 riastrad * entropy lock. 1889 1.4 riastrad * 1890 1.63 riastrad * May be called very early at boot, before entropy_init. 1891 1.63 riastrad * 1892 1.49 riastrad * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 1893 1.49 riastrad * If flags & ENTROPY_SIG, allow interruption by signal. 1894 1.4 riastrad */ 1895 1.49 riastrad static int __attribute__((warn_unused_result)) 1896 1.49 riastrad rnd_lock_sources(int flags) 1897 1.4 riastrad { 1898 1.4 riastrad int error; 1899 1.4 riastrad 1900 1.63 riastrad KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1901 1.63 riastrad KASSERT(!cpu_intr_p()); 1902 1.4 riastrad 1903 1.4 riastrad while (E->sourcelock) { 1904 1.63 riastrad KASSERT(!cold); 1905 1.49 riastrad if (!ISSET(flags, ENTROPY_WAIT)) 1906 1.49 riastrad return EWOULDBLOCK; 1907 1.49 riastrad if (ISSET(flags, ENTROPY_SIG)) { 1908 1.49 riastrad error = cv_wait_sig(&E->sourcelock_cv, &E->lock); 1909 1.49 riastrad if (error) 1910 1.49 riastrad return error; 1911 1.49 riastrad } else { 1912 1.49 riastrad cv_wait(&E->sourcelock_cv, &E->lock); 1913 1.49 riastrad } 1914 1.4 riastrad } 1915 1.4 riastrad 1916 1.4 riastrad E->sourcelock = curlwp; 1917 1.4 riastrad return 0; 1918 1.4 riastrad } 1919 1.4 riastrad 1920 1.4 riastrad /* 1921 1.4 riastrad * rnd_unlock_sources() 1922 1.4 riastrad * 1923 1.49 riastrad * Unlock the list of sources after rnd_lock_sources. Caller must 1924 1.49 riastrad * hold the global entropy lock. 1925 1.63 riastrad * 1926 1.63 riastrad * May be called very early at boot, before entropy_init. 1927 1.4 riastrad */ 1928 1.4 riastrad static void 1929 1.4 riastrad rnd_unlock_sources(void) 1930 1.4 riastrad { 1931 1.4 riastrad 1932 1.63 riastrad KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1933 1.63 riastrad KASSERT(!cpu_intr_p()); 1934 1.4 riastrad 1935 1.16 riastrad KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1936 1.16 riastrad curlwp, E->sourcelock); 1937 1.4 riastrad E->sourcelock = NULL; 1938 1.63 riastrad if (__predict_true(!cold)) 1939 1.27 riastrad cv_signal(&E->sourcelock_cv); 1940 1.4 riastrad } 1941 1.4 riastrad 1942 1.4 riastrad /* 1943 1.4 riastrad * rnd_sources_locked() 1944 1.4 riastrad * 1945 1.4 riastrad * True if we hold the list of rndsources locked, for diagnostic 1946 1.4 riastrad * assertions. 1947 1.63 riastrad * 1948 1.63 riastrad * May be called very early at boot, before entropy_init. 1949 1.4 riastrad */ 1950 1.7 riastrad static bool __diagused 1951 1.4 riastrad rnd_sources_locked(void) 1952 1.4 riastrad { 1953 1.4 riastrad 1954 1.16 riastrad return E->sourcelock == curlwp; 1955 1.4 riastrad } 1956 1.4 riastrad 1957 1.4 riastrad /* 1958 1.49 riastrad * entropy_request(nbytes, flags) 1959 1.1 riastrad * 1960 1.1 riastrad * Request nbytes bytes of entropy from all sources in the system. 1961 1.1 riastrad * OK if we overdo it. Caller must hold the global entropy lock; 1962 1.1 riastrad * will release and re-acquire it. 1963 1.49 riastrad * 1964 1.63 riastrad * May be called very early at boot, before entropy_init. 1965 1.63 riastrad * 1966 1.49 riastrad * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 1967 1.49 riastrad * If flags & ENTROPY_SIG, allow interruption by signal. 1968 1.1 riastrad */ 1969 1.49 riastrad static int 1970 1.49 riastrad entropy_request(size_t nbytes, int flags) 1971 1.1 riastrad { 1972 1.4 riastrad struct krndsource *rs; 1973 1.49 riastrad int error; 1974 1.1 riastrad 1975 1.63 riastrad KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 1976 1.63 riastrad KASSERT(!cpu_intr_p()); 1977 1.63 riastrad if ((flags & ENTROPY_WAIT) != 0 && __predict_false(!cold)) 1978 1.49 riastrad ASSERT_SLEEPABLE(); 1979 1.1 riastrad 1980 1.1 riastrad /* 1981 1.49 riastrad * Lock the list of entropy sources to block rnd_detach_source 1982 1.49 riastrad * until we're done, and to serialize calls to the entropy 1983 1.49 riastrad * callbacks as guaranteed to drivers. 1984 1.1 riastrad */ 1985 1.49 riastrad error = rnd_lock_sources(flags); 1986 1.49 riastrad if (error) 1987 1.49 riastrad return error; 1988 1.1 riastrad entropy_request_evcnt.ev_count++; 1989 1.1 riastrad 1990 1.1 riastrad /* Clamp to the maximum reasonable request. */ 1991 1.1 riastrad nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1992 1.1 riastrad 1993 1.1 riastrad /* Walk the list of sources. */ 1994 1.4 riastrad LIST_FOREACH(rs, &E->sources, list) { 1995 1.1 riastrad /* Skip sources without callbacks. */ 1996 1.1 riastrad if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1997 1.1 riastrad continue; 1998 1.1 riastrad 1999 1.22 riastrad /* 2000 1.22 riastrad * Skip sources that are disabled altogether -- we 2001 1.22 riastrad * would just ignore their samples anyway. 2002 1.22 riastrad */ 2003 1.22 riastrad if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 2004 1.22 riastrad continue; 2005 1.22 riastrad 2006 1.1 riastrad /* Drop the lock while we call the callback. */ 2007 1.63 riastrad if (__predict_true(!cold)) 2008 1.1 riastrad mutex_exit(&E->lock); 2009 1.1 riastrad (*rs->get)(nbytes, rs->getarg); 2010 1.63 riastrad if (__predict_true(!cold)) 2011 1.1 riastrad mutex_enter(&E->lock); 2012 1.1 riastrad } 2013 1.1 riastrad 2014 1.49 riastrad /* Request done; unlock the list of entropy sources. */ 2015 1.4 riastrad rnd_unlock_sources(); 2016 1.49 riastrad return 0; 2017 1.1 riastrad } 2018 1.1 riastrad 2019 1.62 riastrad static inline uint32_t 2020 1.62 riastrad rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 2021 1.62 riastrad { 2022 1.62 riastrad int32_t delta2, delta3; 2023 1.62 riastrad 2024 1.62 riastrad /* 2025 1.62 riastrad * Calculate the second and third order differentials 2026 1.62 riastrad */ 2027 1.62 riastrad delta2 = d->dx - delta; 2028 1.62 riastrad if (delta2 < 0) 2029 1.62 riastrad delta2 = -delta2; /* XXX arithmetic overflow */ 2030 1.62 riastrad 2031 1.62 riastrad delta3 = d->d2x - delta2; 2032 1.62 riastrad if (delta3 < 0) 2033 1.62 riastrad delta3 = -delta3; /* XXX arithmetic overflow */ 2034 1.62 riastrad 2035 1.62 riastrad d->x = v; 2036 1.62 riastrad d->dx = delta; 2037 1.62 riastrad d->d2x = delta2; 2038 1.62 riastrad 2039 1.62 riastrad /* 2040 1.62 riastrad * If any delta is 0, we got no entropy. If all are non-zero, we 2041 1.62 riastrad * might have something. 2042 1.62 riastrad */ 2043 1.62 riastrad if (delta == 0 || delta2 == 0 || delta3 == 0) 2044 1.62 riastrad return 0; 2045 1.62 riastrad 2046 1.62 riastrad return 1; 2047 1.62 riastrad } 2048 1.62 riastrad 2049 1.62 riastrad static inline uint32_t 2050 1.62 riastrad rnd_dt_estimate(struct krndsource *rs, uint32_t t) 2051 1.62 riastrad { 2052 1.62 riastrad int32_t delta; 2053 1.62 riastrad uint32_t ret; 2054 1.62 riastrad rnd_delta_t *d; 2055 1.62 riastrad struct rndsource_cpu *rc; 2056 1.62 riastrad 2057 1.62 riastrad rc = percpu_getref(rs->state); 2058 1.62 riastrad d = &rc->rc_timedelta; 2059 1.62 riastrad 2060 1.62 riastrad if (t < d->x) { 2061 1.62 riastrad delta = UINT32_MAX - d->x + t; 2062 1.62 riastrad } else { 2063 1.62 riastrad delta = d->x - t; 2064 1.62 riastrad } 2065 1.62 riastrad 2066 1.62 riastrad if (delta < 0) { 2067 1.62 riastrad delta = -delta; /* XXX arithmetic overflow */ 2068 1.62 riastrad } 2069 1.62 riastrad 2070 1.62 riastrad ret = rnd_delta_estimate(d, t, delta); 2071 1.62 riastrad 2072 1.62 riastrad KASSERT(d->x == t); 2073 1.62 riastrad KASSERT(d->dx == delta); 2074 1.62 riastrad percpu_putref(rs->state); 2075 1.62 riastrad return ret; 2076 1.62 riastrad } 2077 1.62 riastrad 2078 1.1 riastrad /* 2079 1.1 riastrad * rnd_add_uint32(rs, value) 2080 1.1 riastrad * 2081 1.1 riastrad * Enter 32 bits of data from an entropy source into the pool. 2082 1.1 riastrad * 2083 1.63 riastrad * May be called from any context or with spin locks held, but may 2084 1.63 riastrad * drop data. 2085 1.1 riastrad * 2086 1.63 riastrad * This is meant for cheaply taking samples from devices that 2087 1.63 riastrad * aren't designed to be hardware random number generators. 2088 1.1 riastrad */ 2089 1.1 riastrad void 2090 1.1 riastrad rnd_add_uint32(struct krndsource *rs, uint32_t value) 2091 1.1 riastrad { 2092 1.63 riastrad bool intr_p = true; 2093 1.1 riastrad 2094 1.63 riastrad rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 2095 1.1 riastrad } 2096 1.1 riastrad 2097 1.1 riastrad void 2098 1.1 riastrad _rnd_add_uint32(struct krndsource *rs, uint32_t value) 2099 1.1 riastrad { 2100 1.63 riastrad bool intr_p = true; 2101 1.1 riastrad 2102 1.63 riastrad rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 2103 1.1 riastrad } 2104 1.1 riastrad 2105 1.1 riastrad void 2106 1.1 riastrad _rnd_add_uint64(struct krndsource *rs, uint64_t value) 2107 1.1 riastrad { 2108 1.63 riastrad bool intr_p = true; 2109 1.1 riastrad 2110 1.63 riastrad rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 2111 1.1 riastrad } 2112 1.1 riastrad 2113 1.1 riastrad /* 2114 1.1 riastrad * rnd_add_data(rs, buf, len, entropybits) 2115 1.1 riastrad * 2116 1.1 riastrad * Enter data from an entropy source into the pool, with a 2117 1.1 riastrad * driver's estimate of how much entropy the physical source of 2118 1.1 riastrad * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 2119 1.1 riastrad * estimate and treat it as zero. 2120 1.1 riastrad * 2121 1.63 riastrad * rs MAY but SHOULD NOT be NULL. If rs is NULL, MUST NOT be 2122 1.63 riastrad * called from interrupt context or with spin locks held. 2123 1.1 riastrad * 2124 1.63 riastrad * If rs is non-NULL, MAY but SHOULD NOT be called from interrupt 2125 1.63 riastrad * context, in which case act like rnd_add_data_intr -- if the 2126 1.63 riastrad * sample buffer is full, schedule a softint and drop any 2127 1.63 riastrad * additional data on the floor. (This may change later once we 2128 1.63 riastrad * fix drivers that still call this from interrupt context to use 2129 1.63 riastrad * rnd_add_data_intr instead.) MUST NOT be called with spin locks 2130 1.63 riastrad * held if not in hard interrupt context -- i.e., MUST NOT be 2131 1.63 riastrad * called in thread context or softint context with spin locks 2132 1.63 riastrad * held. 2133 1.1 riastrad */ 2134 1.1 riastrad void 2135 1.1 riastrad rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 2136 1.1 riastrad uint32_t entropybits) 2137 1.1 riastrad { 2138 1.63 riastrad bool intr_p = cpu_intr_p(); /* XXX make this unconditionally false */ 2139 1.1 riastrad 2140 1.63 riastrad /* 2141 1.63 riastrad * Weird legacy exception that we should rip out and replace by 2142 1.63 riastrad * creating new rndsources to attribute entropy to the callers: 2143 1.63 riastrad * If there's no rndsource, just enter the data and time now. 2144 1.63 riastrad */ 2145 1.63 riastrad if (rs == NULL) { 2146 1.63 riastrad uint32_t extra; 2147 1.1 riastrad 2148 1.63 riastrad KASSERT(!intr_p); 2149 1.63 riastrad KASSERTMSG(howmany(entropybits, NBBY) <= len, 2150 1.63 riastrad "%s: impossible entropy rate:" 2151 1.63 riastrad " %"PRIu32" bits in %"PRIu32"-byte string", 2152 1.63 riastrad rs ? rs->name : "(anonymous)", entropybits, len); 2153 1.62 riastrad entropy_enter(buf, len, entropybits, /*count*/false); 2154 1.1 riastrad extra = entropy_timer(); 2155 1.62 riastrad entropy_enter(&extra, sizeof extra, 0, /*count*/false); 2156 1.1 riastrad explicit_memset(&extra, 0, sizeof extra); 2157 1.1 riastrad return; 2158 1.1 riastrad } 2159 1.1 riastrad 2160 1.63 riastrad rnd_add_data_internal(rs, buf, len, entropybits, intr_p); 2161 1.63 riastrad } 2162 1.63 riastrad 2163 1.63 riastrad /* 2164 1.63 riastrad * rnd_add_data_intr(rs, buf, len, entropybits) 2165 1.63 riastrad * 2166 1.63 riastrad * Try to enter data from an entropy source into the pool, with a 2167 1.63 riastrad * driver's estimate of how much entropy the physical source of 2168 1.63 riastrad * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 2169 1.63 riastrad * estimate and treat it as zero. If the sample buffer is full, 2170 1.63 riastrad * schedule a softint and drop any additional data on the floor. 2171 1.63 riastrad */ 2172 1.63 riastrad void 2173 1.63 riastrad rnd_add_data_intr(struct krndsource *rs, const void *buf, uint32_t len, 2174 1.63 riastrad uint32_t entropybits) 2175 1.63 riastrad { 2176 1.63 riastrad bool intr_p = true; 2177 1.63 riastrad 2178 1.63 riastrad rnd_add_data_internal(rs, buf, len, entropybits, intr_p); 2179 1.63 riastrad } 2180 1.63 riastrad 2181 1.63 riastrad /* 2182 1.63 riastrad * rnd_add_data_internal(rs, buf, len, entropybits, intr_p) 2183 1.63 riastrad * 2184 1.63 riastrad * Internal subroutine to decide whether or not to enter data or 2185 1.63 riastrad * timing for a particular rndsource, and if so, to enter it. 2186 1.63 riastrad * 2187 1.63 riastrad * intr_p is true for callers from interrupt context or spin locks 2188 1.63 riastrad * held, and false for callers from thread or soft interrupt 2189 1.63 riastrad * context and no spin locks held. 2190 1.63 riastrad */ 2191 1.63 riastrad static void 2192 1.63 riastrad rnd_add_data_internal(struct krndsource *rs, const void *buf, uint32_t len, 2193 1.63 riastrad uint32_t entropybits, bool intr_p) 2194 1.63 riastrad { 2195 1.63 riastrad uint32_t flags; 2196 1.63 riastrad 2197 1.63 riastrad KASSERTMSG(howmany(entropybits, NBBY) <= len, 2198 1.63 riastrad "%s: impossible entropy rate:" 2199 1.63 riastrad " %"PRIu32" bits in %"PRIu32"-byte string", 2200 1.63 riastrad rs ? rs->name : "(anonymous)", entropybits, len); 2201 1.63 riastrad 2202 1.61 riastrad /* 2203 1.61 riastrad * Hold up the reset xcall before it zeroes the entropy counts 2204 1.61 riastrad * on this CPU or globally. Otherwise, we might leave some 2205 1.61 riastrad * nonzero entropy attributed to an untrusted source in the 2206 1.61 riastrad * event of a race with a change to flags. 2207 1.61 riastrad */ 2208 1.61 riastrad kpreempt_disable(); 2209 1.61 riastrad 2210 1.1 riastrad /* Load a snapshot of the flags. Ioctl may change them under us. */ 2211 1.1 riastrad flags = atomic_load_relaxed(&rs->flags); 2212 1.1 riastrad 2213 1.1 riastrad /* 2214 1.1 riastrad * Skip if: 2215 1.1 riastrad * - we're not collecting entropy, or 2216 1.1 riastrad * - the operator doesn't want to collect entropy from this, or 2217 1.1 riastrad * - neither data nor timings are being collected from this. 2218 1.1 riastrad */ 2219 1.1 riastrad if (!atomic_load_relaxed(&entropy_collection) || 2220 1.1 riastrad ISSET(flags, RND_FLAG_NO_COLLECT) || 2221 1.1 riastrad !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 2222 1.61 riastrad goto out; 2223 1.1 riastrad 2224 1.1 riastrad /* If asked, ignore the estimate. */ 2225 1.1 riastrad if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 2226 1.1 riastrad entropybits = 0; 2227 1.1 riastrad 2228 1.1 riastrad /* If we are collecting data, enter them. */ 2229 1.62 riastrad if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) { 2230 1.62 riastrad rnd_add_data_1(rs, buf, len, entropybits, /*count*/false, 2231 1.63 riastrad RND_FLAG_COLLECT_VALUE, intr_p); 2232 1.62 riastrad } 2233 1.1 riastrad 2234 1.1 riastrad /* If we are collecting timings, enter one. */ 2235 1.1 riastrad if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 2236 1.63 riastrad uint32_t extra; 2237 1.62 riastrad bool count; 2238 1.62 riastrad 2239 1.62 riastrad /* Sample a timer. */ 2240 1.1 riastrad extra = entropy_timer(); 2241 1.62 riastrad 2242 1.62 riastrad /* If asked, do entropy estimation on the time. */ 2243 1.62 riastrad if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) == 2244 1.63 riastrad RND_FLAG_ESTIMATE_TIME && __predict_true(!cold)) 2245 1.62 riastrad count = rnd_dt_estimate(rs, extra); 2246 1.62 riastrad else 2247 1.62 riastrad count = false; 2248 1.62 riastrad 2249 1.62 riastrad rnd_add_data_1(rs, &extra, sizeof extra, 0, count, 2250 1.63 riastrad RND_FLAG_COLLECT_TIME, intr_p); 2251 1.1 riastrad } 2252 1.61 riastrad 2253 1.61 riastrad out: /* Allow concurrent changes to flags to finish. */ 2254 1.61 riastrad kpreempt_enable(); 2255 1.1 riastrad } 2256 1.1 riastrad 2257 1.28 riastrad static unsigned 2258 1.28 riastrad add_sat(unsigned a, unsigned b) 2259 1.28 riastrad { 2260 1.28 riastrad unsigned c = a + b; 2261 1.28 riastrad 2262 1.28 riastrad return (c < a ? UINT_MAX : c); 2263 1.28 riastrad } 2264 1.28 riastrad 2265 1.1 riastrad /* 2266 1.62 riastrad * rnd_add_data_1(rs, buf, len, entropybits, count, flag) 2267 1.1 riastrad * 2268 1.1 riastrad * Internal subroutine to call either entropy_enter_intr, if we're 2269 1.1 riastrad * in interrupt context, or entropy_enter if not, and to count the 2270 1.1 riastrad * entropy in an rndsource. 2271 1.1 riastrad */ 2272 1.1 riastrad static void 2273 1.1 riastrad rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 2274 1.63 riastrad uint32_t entropybits, bool count, uint32_t flag, bool intr_p) 2275 1.1 riastrad { 2276 1.1 riastrad bool fullyused; 2277 1.1 riastrad 2278 1.1 riastrad /* 2279 1.63 riastrad * For the interrupt-like path, use entropy_enter_intr and take 2280 1.63 riastrad * note of whether it consumed the full sample; otherwise, use 2281 1.63 riastrad * entropy_enter, which always consumes the full sample. 2282 1.1 riastrad */ 2283 1.63 riastrad if (intr_p) { 2284 1.62 riastrad fullyused = entropy_enter_intr(buf, len, entropybits, count); 2285 1.1 riastrad } else { 2286 1.62 riastrad entropy_enter(buf, len, entropybits, count); 2287 1.1 riastrad fullyused = true; 2288 1.1 riastrad } 2289 1.1 riastrad 2290 1.1 riastrad /* 2291 1.1 riastrad * If we used the full sample, note how many bits were 2292 1.1 riastrad * contributed from this source. 2293 1.1 riastrad */ 2294 1.1 riastrad if (fullyused) { 2295 1.63 riastrad if (__predict_false(cold)) { 2296 1.63 riastrad const int s = splhigh(); 2297 1.28 riastrad rs->total = add_sat(rs->total, entropybits); 2298 1.28 riastrad switch (flag) { 2299 1.28 riastrad case RND_FLAG_COLLECT_TIME: 2300 1.28 riastrad rs->time_delta.insamples = 2301 1.28 riastrad add_sat(rs->time_delta.insamples, 1); 2302 1.28 riastrad break; 2303 1.28 riastrad case RND_FLAG_COLLECT_VALUE: 2304 1.28 riastrad rs->value_delta.insamples = 2305 1.28 riastrad add_sat(rs->value_delta.insamples, 1); 2306 1.28 riastrad break; 2307 1.28 riastrad } 2308 1.63 riastrad splx(s); 2309 1.1 riastrad } else { 2310 1.1 riastrad struct rndsource_cpu *rc = percpu_getref(rs->state); 2311 1.1 riastrad 2312 1.28 riastrad atomic_store_relaxed(&rc->rc_entropybits, 2313 1.28 riastrad add_sat(rc->rc_entropybits, entropybits)); 2314 1.28 riastrad switch (flag) { 2315 1.28 riastrad case RND_FLAG_COLLECT_TIME: 2316 1.28 riastrad atomic_store_relaxed(&rc->rc_timesamples, 2317 1.28 riastrad add_sat(rc->rc_timesamples, 1)); 2318 1.28 riastrad break; 2319 1.28 riastrad case RND_FLAG_COLLECT_VALUE: 2320 1.28 riastrad atomic_store_relaxed(&rc->rc_datasamples, 2321 1.28 riastrad add_sat(rc->rc_datasamples, 1)); 2322 1.28 riastrad break; 2323 1.28 riastrad } 2324 1.1 riastrad percpu_putref(rs->state); 2325 1.1 riastrad } 2326 1.1 riastrad } 2327 1.1 riastrad } 2328 1.1 riastrad 2329 1.1 riastrad /* 2330 1.1 riastrad * rnd_add_data_sync(rs, buf, len, entropybits) 2331 1.1 riastrad * 2332 1.1 riastrad * Same as rnd_add_data. Originally used in rndsource callbacks, 2333 1.1 riastrad * to break an unnecessary cycle; no longer really needed. 2334 1.1 riastrad */ 2335 1.1 riastrad void 2336 1.1 riastrad rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 2337 1.1 riastrad uint32_t entropybits) 2338 1.1 riastrad { 2339 1.1 riastrad 2340 1.1 riastrad rnd_add_data(rs, buf, len, entropybits); 2341 1.1 riastrad } 2342 1.1 riastrad 2343 1.1 riastrad /* 2344 1.1 riastrad * rndsource_entropybits(rs) 2345 1.1 riastrad * 2346 1.1 riastrad * Return approximately the number of bits of entropy that have 2347 1.1 riastrad * been contributed via rs so far. Approximate if other CPUs may 2348 1.1 riastrad * be calling rnd_add_data concurrently. 2349 1.1 riastrad */ 2350 1.1 riastrad static unsigned 2351 1.1 riastrad rndsource_entropybits(struct krndsource *rs) 2352 1.1 riastrad { 2353 1.1 riastrad unsigned nbits = rs->total; 2354 1.1 riastrad 2355 1.63 riastrad KASSERT(!cold); 2356 1.4 riastrad KASSERT(rnd_sources_locked()); 2357 1.1 riastrad percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 2358 1.1 riastrad return nbits; 2359 1.1 riastrad } 2360 1.1 riastrad 2361 1.1 riastrad static void 2362 1.1 riastrad rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2363 1.1 riastrad { 2364 1.1 riastrad struct rndsource_cpu *rc = ptr; 2365 1.1 riastrad unsigned *nbitsp = cookie; 2366 1.1 riastrad unsigned cpu_nbits; 2367 1.1 riastrad 2368 1.28 riastrad cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits); 2369 1.1 riastrad *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 2370 1.1 riastrad } 2371 1.1 riastrad 2372 1.1 riastrad /* 2373 1.1 riastrad * rndsource_to_user(rs, urs) 2374 1.1 riastrad * 2375 1.1 riastrad * Copy a description of rs out to urs for userland. 2376 1.1 riastrad */ 2377 1.1 riastrad static void 2378 1.1 riastrad rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 2379 1.1 riastrad { 2380 1.1 riastrad 2381 1.63 riastrad KASSERT(!cold); 2382 1.4 riastrad KASSERT(rnd_sources_locked()); 2383 1.1 riastrad 2384 1.1 riastrad /* Avoid kernel memory disclosure. */ 2385 1.1 riastrad memset(urs, 0, sizeof(*urs)); 2386 1.1 riastrad 2387 1.1 riastrad CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 2388 1.1 riastrad strlcpy(urs->name, rs->name, sizeof(urs->name)); 2389 1.1 riastrad urs->total = rndsource_entropybits(rs); 2390 1.1 riastrad urs->type = rs->type; 2391 1.1 riastrad urs->flags = atomic_load_relaxed(&rs->flags); 2392 1.1 riastrad } 2393 1.1 riastrad 2394 1.1 riastrad /* 2395 1.1 riastrad * rndsource_to_user_est(rs, urse) 2396 1.1 riastrad * 2397 1.1 riastrad * Copy a description of rs and estimation statistics out to urse 2398 1.1 riastrad * for userland. 2399 1.1 riastrad */ 2400 1.1 riastrad static void 2401 1.1 riastrad rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 2402 1.1 riastrad { 2403 1.1 riastrad 2404 1.63 riastrad KASSERT(!cold); 2405 1.4 riastrad KASSERT(rnd_sources_locked()); 2406 1.1 riastrad 2407 1.1 riastrad /* Avoid kernel memory disclosure. */ 2408 1.1 riastrad memset(urse, 0, sizeof(*urse)); 2409 1.1 riastrad 2410 1.1 riastrad /* Copy out the rndsource description. */ 2411 1.1 riastrad rndsource_to_user(rs, &urse->rt); 2412 1.1 riastrad 2413 1.28 riastrad /* Gather the statistics. */ 2414 1.28 riastrad urse->dt_samples = rs->time_delta.insamples; 2415 1.1 riastrad urse->dt_total = 0; 2416 1.28 riastrad urse->dv_samples = rs->value_delta.insamples; 2417 1.28 riastrad urse->dv_total = urse->rt.total; 2418 1.28 riastrad percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse); 2419 1.28 riastrad } 2420 1.28 riastrad 2421 1.28 riastrad static void 2422 1.28 riastrad rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2423 1.28 riastrad { 2424 1.28 riastrad struct rndsource_cpu *rc = ptr; 2425 1.28 riastrad rndsource_est_t *urse = cookie; 2426 1.28 riastrad 2427 1.28 riastrad urse->dt_samples = add_sat(urse->dt_samples, 2428 1.28 riastrad atomic_load_relaxed(&rc->rc_timesamples)); 2429 1.28 riastrad urse->dv_samples = add_sat(urse->dv_samples, 2430 1.28 riastrad atomic_load_relaxed(&rc->rc_datasamples)); 2431 1.1 riastrad } 2432 1.1 riastrad 2433 1.1 riastrad /* 2434 1.21 riastrad * entropy_reset_xc(arg1, arg2) 2435 1.21 riastrad * 2436 1.21 riastrad * Reset the current CPU's pending entropy to zero. 2437 1.21 riastrad */ 2438 1.21 riastrad static void 2439 1.21 riastrad entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 2440 1.21 riastrad { 2441 1.21 riastrad uint32_t extra = entropy_timer(); 2442 1.43 riastrad struct entropy_cpu_lock lock; 2443 1.21 riastrad struct entropy_cpu *ec; 2444 1.21 riastrad 2445 1.21 riastrad /* 2446 1.43 riastrad * With the per-CPU state locked, zero the pending count and 2447 1.43 riastrad * enter a cycle count for fun. 2448 1.21 riastrad */ 2449 1.43 riastrad ec = entropy_cpu_get(&lock); 2450 1.62 riastrad ec->ec_bitspending = 0; 2451 1.62 riastrad ec->ec_samplespending = 0; 2452 1.21 riastrad entpool_enter(ec->ec_pool, &extra, sizeof extra); 2453 1.43 riastrad entropy_cpu_put(&lock, ec); 2454 1.21 riastrad } 2455 1.21 riastrad 2456 1.21 riastrad /* 2457 1.70 riastrad * entropy_reset() 2458 1.70 riastrad * 2459 1.70 riastrad * Assume the entropy pool has been exposed, e.g. because the VM 2460 1.70 riastrad * has been cloned. Nix all the pending entropy and set the 2461 1.70 riastrad * needed to maximum. 2462 1.70 riastrad */ 2463 1.70 riastrad void 2464 1.70 riastrad entropy_reset(void) 2465 1.70 riastrad { 2466 1.70 riastrad 2467 1.70 riastrad xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2468 1.70 riastrad mutex_enter(&E->lock); 2469 1.70 riastrad E->bitspending = 0; 2470 1.70 riastrad E->samplespending = 0; 2471 1.70 riastrad atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); 2472 1.70 riastrad atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); 2473 1.70 riastrad E->consolidate = false; 2474 1.70 riastrad mutex_exit(&E->lock); 2475 1.70 riastrad } 2476 1.70 riastrad 2477 1.70 riastrad /* 2478 1.1 riastrad * entropy_ioctl(cmd, data) 2479 1.1 riastrad * 2480 1.1 riastrad * Handle various /dev/random ioctl queries. 2481 1.1 riastrad */ 2482 1.1 riastrad int 2483 1.1 riastrad entropy_ioctl(unsigned long cmd, void *data) 2484 1.1 riastrad { 2485 1.1 riastrad struct krndsource *rs; 2486 1.1 riastrad bool privileged; 2487 1.1 riastrad int error; 2488 1.1 riastrad 2489 1.63 riastrad KASSERT(!cold); 2490 1.1 riastrad 2491 1.1 riastrad /* Verify user's authorization to perform the ioctl. */ 2492 1.1 riastrad switch (cmd) { 2493 1.1 riastrad case RNDGETENTCNT: 2494 1.1 riastrad case RNDGETPOOLSTAT: 2495 1.1 riastrad case RNDGETSRCNUM: 2496 1.1 riastrad case RNDGETSRCNAME: 2497 1.1 riastrad case RNDGETESTNUM: 2498 1.1 riastrad case RNDGETESTNAME: 2499 1.31 christos error = kauth_authorize_device(kauth_cred_get(), 2500 1.1 riastrad KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2501 1.1 riastrad break; 2502 1.1 riastrad case RNDCTL: 2503 1.31 christos error = kauth_authorize_device(kauth_cred_get(), 2504 1.1 riastrad KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2505 1.1 riastrad break; 2506 1.1 riastrad case RNDADDDATA: 2507 1.31 christos error = kauth_authorize_device(kauth_cred_get(), 2508 1.1 riastrad KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2509 1.1 riastrad /* Ascertain whether the user's inputs should be counted. */ 2510 1.31 christos if (kauth_authorize_device(kauth_cred_get(), 2511 1.1 riastrad KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2512 1.1 riastrad NULL, NULL, NULL, NULL) == 0) 2513 1.1 riastrad privileged = true; 2514 1.1 riastrad break; 2515 1.1 riastrad default: { 2516 1.1 riastrad /* 2517 1.1 riastrad * XXX Hack to avoid changing module ABI so this can be 2518 1.1 riastrad * pulled up. Later, we can just remove the argument. 2519 1.1 riastrad */ 2520 1.1 riastrad static const struct fileops fops = { 2521 1.1 riastrad .fo_ioctl = rnd_system_ioctl, 2522 1.1 riastrad }; 2523 1.1 riastrad struct file f = { 2524 1.1 riastrad .f_ops = &fops, 2525 1.1 riastrad }; 2526 1.1 riastrad MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2527 1.1 riastrad enosys(), error); 2528 1.1 riastrad #if defined(_LP64) 2529 1.1 riastrad if (error == ENOSYS) 2530 1.1 riastrad MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2531 1.1 riastrad enosys(), error); 2532 1.1 riastrad #endif 2533 1.1 riastrad if (error == ENOSYS) 2534 1.1 riastrad error = ENOTTY; 2535 1.1 riastrad break; 2536 1.1 riastrad } 2537 1.1 riastrad } 2538 1.1 riastrad 2539 1.1 riastrad /* If anything went wrong with authorization, stop here. */ 2540 1.1 riastrad if (error) 2541 1.1 riastrad return error; 2542 1.1 riastrad 2543 1.1 riastrad /* Dispatch on the command. */ 2544 1.1 riastrad switch (cmd) { 2545 1.1 riastrad case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2546 1.1 riastrad uint32_t *countp = data; 2547 1.1 riastrad 2548 1.1 riastrad mutex_enter(&E->lock); 2549 1.62 riastrad *countp = MINENTROPYBITS - E->bitsneeded; 2550 1.1 riastrad mutex_exit(&E->lock); 2551 1.1 riastrad 2552 1.1 riastrad break; 2553 1.1 riastrad } 2554 1.1 riastrad case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2555 1.1 riastrad rndpoolstat_t *pstat = data; 2556 1.1 riastrad 2557 1.1 riastrad mutex_enter(&E->lock); 2558 1.1 riastrad 2559 1.1 riastrad /* parameters */ 2560 1.1 riastrad pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2561 1.62 riastrad pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */ 2562 1.1 riastrad pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2563 1.1 riastrad 2564 1.1 riastrad /* state */ 2565 1.1 riastrad pstat->added = 0; /* XXX total entropy_enter count */ 2566 1.62 riastrad pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */ 2567 1.1 riastrad pstat->removed = 0; /* XXX total entropy_extract count */ 2568 1.1 riastrad pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2569 1.62 riastrad 2570 1.62 riastrad /* 2571 1.62 riastrad * This used to be bits of data fabricated in some 2572 1.62 riastrad * sense; we'll take it to mean number of samples, 2573 1.62 riastrad * excluding the bits of entropy from HWRNG or seed. 2574 1.62 riastrad */ 2575 1.62 riastrad pstat->generated = MINSAMPLES - E->samplesneeded; 2576 1.62 riastrad pstat->generated -= MIN(pstat->generated, pstat->curentropy); 2577 1.1 riastrad 2578 1.1 riastrad mutex_exit(&E->lock); 2579 1.1 riastrad break; 2580 1.1 riastrad } 2581 1.1 riastrad case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2582 1.1 riastrad rndstat_t *stat = data; 2583 1.1 riastrad uint32_t start = 0, i = 0; 2584 1.1 riastrad 2585 1.1 riastrad /* Skip if none requested; fail if too many requested. */ 2586 1.1 riastrad if (stat->count == 0) 2587 1.1 riastrad break; 2588 1.1 riastrad if (stat->count > RND_MAXSTATCOUNT) 2589 1.1 riastrad return EINVAL; 2590 1.1 riastrad 2591 1.1 riastrad /* 2592 1.1 riastrad * Under the lock, find the first one, copy out as many 2593 1.1 riastrad * as requested, and report how many we copied out. 2594 1.1 riastrad */ 2595 1.1 riastrad mutex_enter(&E->lock); 2596 1.49 riastrad error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2597 1.4 riastrad if (error) { 2598 1.4 riastrad mutex_exit(&E->lock); 2599 1.4 riastrad return error; 2600 1.4 riastrad } 2601 1.1 riastrad LIST_FOREACH(rs, &E->sources, list) { 2602 1.1 riastrad if (start++ == stat->start) 2603 1.1 riastrad break; 2604 1.1 riastrad } 2605 1.1 riastrad while (i < stat->count && rs != NULL) { 2606 1.5 riastrad mutex_exit(&E->lock); 2607 1.1 riastrad rndsource_to_user(rs, &stat->source[i++]); 2608 1.5 riastrad mutex_enter(&E->lock); 2609 1.1 riastrad rs = LIST_NEXT(rs, list); 2610 1.1 riastrad } 2611 1.1 riastrad KASSERT(i <= stat->count); 2612 1.1 riastrad stat->count = i; 2613 1.4 riastrad rnd_unlock_sources(); 2614 1.1 riastrad mutex_exit(&E->lock); 2615 1.1 riastrad break; 2616 1.1 riastrad } 2617 1.1 riastrad case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2618 1.1 riastrad rndstat_est_t *estat = data; 2619 1.1 riastrad uint32_t start = 0, i = 0; 2620 1.1 riastrad 2621 1.1 riastrad /* Skip if none requested; fail if too many requested. */ 2622 1.1 riastrad if (estat->count == 0) 2623 1.1 riastrad break; 2624 1.1 riastrad if (estat->count > RND_MAXSTATCOUNT) 2625 1.1 riastrad return EINVAL; 2626 1.1 riastrad 2627 1.1 riastrad /* 2628 1.1 riastrad * Under the lock, find the first one, copy out as many 2629 1.1 riastrad * as requested, and report how many we copied out. 2630 1.1 riastrad */ 2631 1.1 riastrad mutex_enter(&E->lock); 2632 1.49 riastrad error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2633 1.4 riastrad if (error) { 2634 1.4 riastrad mutex_exit(&E->lock); 2635 1.4 riastrad return error; 2636 1.4 riastrad } 2637 1.1 riastrad LIST_FOREACH(rs, &E->sources, list) { 2638 1.1 riastrad if (start++ == estat->start) 2639 1.1 riastrad break; 2640 1.1 riastrad } 2641 1.1 riastrad while (i < estat->count && rs != NULL) { 2642 1.4 riastrad mutex_exit(&E->lock); 2643 1.1 riastrad rndsource_to_user_est(rs, &estat->source[i++]); 2644 1.4 riastrad mutex_enter(&E->lock); 2645 1.1 riastrad rs = LIST_NEXT(rs, list); 2646 1.1 riastrad } 2647 1.1 riastrad KASSERT(i <= estat->count); 2648 1.1 riastrad estat->count = i; 2649 1.4 riastrad rnd_unlock_sources(); 2650 1.1 riastrad mutex_exit(&E->lock); 2651 1.1 riastrad break; 2652 1.1 riastrad } 2653 1.1 riastrad case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2654 1.1 riastrad rndstat_name_t *nstat = data; 2655 1.1 riastrad const size_t n = sizeof(rs->name); 2656 1.1 riastrad 2657 1.1 riastrad CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2658 1.1 riastrad 2659 1.1 riastrad /* 2660 1.1 riastrad * Under the lock, search by name. If found, copy it 2661 1.1 riastrad * out; if not found, fail with ENOENT. 2662 1.1 riastrad */ 2663 1.1 riastrad mutex_enter(&E->lock); 2664 1.49 riastrad error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2665 1.4 riastrad if (error) { 2666 1.4 riastrad mutex_exit(&E->lock); 2667 1.4 riastrad return error; 2668 1.4 riastrad } 2669 1.1 riastrad LIST_FOREACH(rs, &E->sources, list) { 2670 1.1 riastrad if (strncmp(rs->name, nstat->name, n) == 0) 2671 1.1 riastrad break; 2672 1.1 riastrad } 2673 1.4 riastrad if (rs != NULL) { 2674 1.4 riastrad mutex_exit(&E->lock); 2675 1.1 riastrad rndsource_to_user(rs, &nstat->source); 2676 1.4 riastrad mutex_enter(&E->lock); 2677 1.4 riastrad } else { 2678 1.1 riastrad error = ENOENT; 2679 1.4 riastrad } 2680 1.4 riastrad rnd_unlock_sources(); 2681 1.1 riastrad mutex_exit(&E->lock); 2682 1.1 riastrad break; 2683 1.1 riastrad } 2684 1.1 riastrad case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2685 1.1 riastrad rndstat_est_name_t *enstat = data; 2686 1.1 riastrad const size_t n = sizeof(rs->name); 2687 1.1 riastrad 2688 1.1 riastrad CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2689 1.1 riastrad 2690 1.1 riastrad /* 2691 1.1 riastrad * Under the lock, search by name. If found, copy it 2692 1.1 riastrad * out; if not found, fail with ENOENT. 2693 1.1 riastrad */ 2694 1.1 riastrad mutex_enter(&E->lock); 2695 1.49 riastrad error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2696 1.4 riastrad if (error) { 2697 1.4 riastrad mutex_exit(&E->lock); 2698 1.4 riastrad return error; 2699 1.4 riastrad } 2700 1.1 riastrad LIST_FOREACH(rs, &E->sources, list) { 2701 1.1 riastrad if (strncmp(rs->name, enstat->name, n) == 0) 2702 1.1 riastrad break; 2703 1.1 riastrad } 2704 1.4 riastrad if (rs != NULL) { 2705 1.4 riastrad mutex_exit(&E->lock); 2706 1.1 riastrad rndsource_to_user_est(rs, &enstat->source); 2707 1.4 riastrad mutex_enter(&E->lock); 2708 1.4 riastrad } else { 2709 1.1 riastrad error = ENOENT; 2710 1.4 riastrad } 2711 1.4 riastrad rnd_unlock_sources(); 2712 1.1 riastrad mutex_exit(&E->lock); 2713 1.1 riastrad break; 2714 1.1 riastrad } 2715 1.1 riastrad case RNDCTL: { /* Modify entropy source flags. */ 2716 1.1 riastrad rndctl_t *rndctl = data; 2717 1.1 riastrad const size_t n = sizeof(rs->name); 2718 1.21 riastrad uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2719 1.1 riastrad uint32_t flags; 2720 1.21 riastrad bool reset = false, request = false; 2721 1.1 riastrad 2722 1.1 riastrad CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2723 1.1 riastrad 2724 1.1 riastrad /* Whitelist the flags that user can change. */ 2725 1.1 riastrad rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2726 1.1 riastrad 2727 1.1 riastrad /* 2728 1.1 riastrad * For each matching rndsource, either by type if 2729 1.1 riastrad * specified or by name if not, set the masked flags. 2730 1.1 riastrad */ 2731 1.1 riastrad mutex_enter(&E->lock); 2732 1.1 riastrad LIST_FOREACH(rs, &E->sources, list) { 2733 1.1 riastrad if (rndctl->type != 0xff) { 2734 1.1 riastrad if (rs->type != rndctl->type) 2735 1.1 riastrad continue; 2736 1.59 riastrad } else if (rndctl->name[0] != '\0') { 2737 1.1 riastrad if (strncmp(rs->name, rndctl->name, n) != 0) 2738 1.1 riastrad continue; 2739 1.1 riastrad } 2740 1.1 riastrad flags = rs->flags & ~rndctl->mask; 2741 1.1 riastrad flags |= rndctl->flags & rndctl->mask; 2742 1.21 riastrad if ((rs->flags & resetflags) == 0 && 2743 1.21 riastrad (flags & resetflags) != 0) 2744 1.21 riastrad reset = true; 2745 1.21 riastrad if ((rs->flags ^ flags) & resetflags) 2746 1.21 riastrad request = true; 2747 1.1 riastrad atomic_store_relaxed(&rs->flags, flags); 2748 1.1 riastrad } 2749 1.1 riastrad mutex_exit(&E->lock); 2750 1.21 riastrad 2751 1.21 riastrad /* 2752 1.21 riastrad * If we disabled estimation or collection, nix all the 2753 1.21 riastrad * pending entropy and set needed to the maximum. 2754 1.21 riastrad */ 2755 1.70 riastrad if (reset) 2756 1.70 riastrad entropy_reset(); 2757 1.21 riastrad 2758 1.21 riastrad /* 2759 1.21 riastrad * If we changed any of the estimation or collection 2760 1.21 riastrad * flags, request new samples from everyone -- either 2761 1.21 riastrad * to make up for what we just lost, or to get new 2762 1.21 riastrad * samples from what we just added. 2763 1.49 riastrad * 2764 1.49 riastrad * Failing on signal, while waiting for another process 2765 1.49 riastrad * to finish requesting entropy, is OK here even though 2766 1.49 riastrad * we have committed side effects, because this ioctl 2767 1.49 riastrad * command is idempotent, so repeating it is safe. 2768 1.21 riastrad */ 2769 1.70 riastrad if (request) 2770 1.70 riastrad error = entropy_gather(); 2771 1.1 riastrad break; 2772 1.1 riastrad } 2773 1.1 riastrad case RNDADDDATA: { /* Enter seed into entropy pool. */ 2774 1.1 riastrad rnddata_t *rdata = data; 2775 1.1 riastrad unsigned entropybits = 0; 2776 1.1 riastrad 2777 1.1 riastrad if (!atomic_load_relaxed(&entropy_collection)) 2778 1.1 riastrad break; /* thanks but no thanks */ 2779 1.1 riastrad if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2780 1.1 riastrad return EINVAL; 2781 1.1 riastrad 2782 1.1 riastrad /* 2783 1.1 riastrad * This ioctl serves as the userland alternative a 2784 1.1 riastrad * bootloader-provided seed -- typically furnished by 2785 1.1 riastrad * /etc/rc.d/random_seed. We accept the user's entropy 2786 1.1 riastrad * claim only if 2787 1.1 riastrad * 2788 1.1 riastrad * (a) the user is privileged, and 2789 1.1 riastrad * (b) we have not entered a bootloader seed. 2790 1.1 riastrad * 2791 1.1 riastrad * under the assumption that the user may use this to 2792 1.1 riastrad * load a seed from disk that we have already loaded 2793 1.1 riastrad * from the bootloader, so we don't double-count it. 2794 1.1 riastrad */ 2795 1.11 riastrad if (privileged && rdata->entropy && rdata->len) { 2796 1.1 riastrad mutex_enter(&E->lock); 2797 1.1 riastrad if (!E->seeded) { 2798 1.1 riastrad entropybits = MIN(rdata->entropy, 2799 1.1 riastrad MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2800 1.1 riastrad E->seeded = true; 2801 1.1 riastrad } 2802 1.1 riastrad mutex_exit(&E->lock); 2803 1.1 riastrad } 2804 1.1 riastrad 2805 1.13 riastrad /* Enter the data and consolidate entropy. */ 2806 1.1 riastrad rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2807 1.1 riastrad entropybits); 2808 1.72 riastrad error = entropy_consolidate(); 2809 1.1 riastrad break; 2810 1.1 riastrad } 2811 1.1 riastrad default: 2812 1.1 riastrad error = ENOTTY; 2813 1.1 riastrad } 2814 1.1 riastrad 2815 1.1 riastrad /* Return any error that may have come up. */ 2816 1.1 riastrad return error; 2817 1.1 riastrad } 2818 1.1 riastrad 2819 1.1 riastrad /* Legacy entry points */ 2820 1.1 riastrad 2821 1.1 riastrad void 2822 1.1 riastrad rnd_seed(void *seed, size_t len) 2823 1.1 riastrad { 2824 1.1 riastrad 2825 1.1 riastrad if (len != sizeof(rndsave_t)) { 2826 1.1 riastrad printf("entropy: invalid seed length: %zu," 2827 1.1 riastrad " expected sizeof(rndsave_t) = %zu\n", 2828 1.1 riastrad len, sizeof(rndsave_t)); 2829 1.1 riastrad return; 2830 1.1 riastrad } 2831 1.1 riastrad entropy_seed(seed); 2832 1.1 riastrad } 2833 1.1 riastrad 2834 1.1 riastrad void 2835 1.1 riastrad rnd_init(void) 2836 1.1 riastrad { 2837 1.1 riastrad 2838 1.1 riastrad entropy_init(); 2839 1.1 riastrad } 2840 1.1 riastrad 2841 1.1 riastrad void 2842 1.1 riastrad rnd_init_softint(void) 2843 1.1 riastrad { 2844 1.1 riastrad 2845 1.1 riastrad entropy_init_late(); 2846 1.38 riastrad entropy_bootrequest(); 2847 1.1 riastrad } 2848 1.1 riastrad 2849 1.1 riastrad int 2850 1.1 riastrad rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2851 1.1 riastrad { 2852 1.1 riastrad 2853 1.1 riastrad return entropy_ioctl(cmd, data); 2854 1.1 riastrad } 2855