Home | History | Annotate | Line # | Download | only in gen
arc4random.c revision 1.41
      1 /*	$NetBSD: arc4random.c,v 1.41 2025/03/02 22:53:45 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Legacy arc4random(3) API from OpenBSD reimplemented using the
     34  * ChaCha20 PRF, with per-thread state.
     35  *
     36  * Security model:
     37  * - An attacker who sees some outputs cannot predict past or future
     38  *   outputs.
     39  * - An attacker who sees the PRNG state cannot predict past outputs.
     40  * - An attacker who sees a child's PRNG state cannot predict past or
     41  *   future outputs in the parent, or in other children.
     42  *
     43  * The arc4random(3) API may abort the process if:
     44  *
     45  * (a) the crypto self-test fails, or
     46  * (b) sysctl(KERN_ARND) fails when reseeding the PRNG.
     47  *
     48  * The crypto self-test occurs only once, on the first use of any of
     49  * the arc4random(3) API.  KERN_ARND is unlikely to fail later unless
     50  * the kernel is seriously broken.
     51  */
     52 
     53 #include <sys/cdefs.h>
     54 __RCSID("$NetBSD: arc4random.c,v 1.41 2025/03/02 22:53:45 riastradh Exp $");
     55 
     56 #include "namespace.h"
     57 #include "reentrant.h"
     58 
     59 #include <sys/bitops.h>
     60 #include <sys/endian.h>
     61 #include <sys/errno.h>
     62 #include <sys/mman.h>
     63 #include <sys/sysctl.h>
     64 
     65 #include <assert.h>
     66 #include <sha2.h>
     67 #include <stdatomic.h>
     68 #include <stdbool.h>
     69 #include <stdint.h>
     70 #include <stdlib.h>
     71 #include <string.h>
     72 #include <unistd.h>
     73 
     74 #include "arc4random.h"
     75 #include "atfork.h"
     76 #include "reentrant.h"
     77 
     78 #ifdef __weak_alias
     79 __weak_alias(arc4random,_arc4random)
     80 __weak_alias(arc4random_addrandom,_arc4random_addrandom)
     81 __weak_alias(arc4random_buf,_arc4random_buf)
     82 __weak_alias(arc4random_stir,_arc4random_stir)
     83 __weak_alias(arc4random_uniform,_arc4random_uniform)
     84 #endif
     85 
     86 /*
     87  * For standard ChaCha, use le32dec/le32enc.  We don't need that for
     88  * the purposes of a nondeterministic random number generator -- we
     89  * don't need to be bit-for-bit compatible over any wire.
     90  */
     91 
     92 static inline uint32_t
     93 crypto_le32dec(const void *p)
     94 {
     95 	uint32_t v;
     96 
     97 	(void)memcpy(&v, p, sizeof v);
     98 
     99 	return v;
    100 }
    101 
    102 static inline void
    103 crypto_le32enc(void *p, uint32_t v)
    104 {
    105 
    106 	(void)memcpy(p, &v, sizeof v);
    107 }
    108 
    109 /* ChaCha core */
    110 
    111 #define	crypto_core_OUTPUTBYTES	64
    112 #define	crypto_core_INPUTBYTES	16
    113 #define	crypto_core_KEYBYTES	32
    114 #define	crypto_core_CONSTBYTES	16
    115 
    116 #define	crypto_core_ROUNDS	20
    117 
    118 static uint32_t
    119 rotate(uint32_t u, unsigned c)
    120 {
    121 
    122 	return (u << c) | (u >> (32 - c));
    123 }
    124 
    125 #define	QUARTERROUND(a, b, c, d) do {					      \
    126 	(a) += (b); (d) ^= (a); (d) = rotate((d), 16);			      \
    127 	(c) += (d); (b) ^= (c); (b) = rotate((b), 12);			      \
    128 	(a) += (b); (d) ^= (a); (d) = rotate((d),  8);			      \
    129 	(c) += (d); (b) ^= (c); (b) = rotate((b),  7);			      \
    130 } while (0)
    131 
    132 static const uint8_t crypto_core_constant32[16] = "expand 32-byte k";
    133 
    134 static void
    135 crypto_core(uint8_t *out, const uint8_t *in, const uint8_t *k,
    136     const uint8_t *c)
    137 {
    138 	uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15;
    139 	uint32_t j0,j1,j2,j3,j4,j5,j6,j7,j8,j9,j10,j11,j12,j13,j14,j15;
    140 	int i;
    141 
    142 	j0 = x0 = crypto_le32dec(c + 0);
    143 	j1 = x1 = crypto_le32dec(c + 4);
    144 	j2 = x2 = crypto_le32dec(c + 8);
    145 	j3 = x3 = crypto_le32dec(c + 12);
    146 	j4 = x4 = crypto_le32dec(k + 0);
    147 	j5 = x5 = crypto_le32dec(k + 4);
    148 	j6 = x6 = crypto_le32dec(k + 8);
    149 	j7 = x7 = crypto_le32dec(k + 12);
    150 	j8 = x8 = crypto_le32dec(k + 16);
    151 	j9 = x9 = crypto_le32dec(k + 20);
    152 	j10 = x10 = crypto_le32dec(k + 24);
    153 	j11 = x11 = crypto_le32dec(k + 28);
    154 	j12 = x12 = crypto_le32dec(in + 0);
    155 	j13 = x13 = crypto_le32dec(in + 4);
    156 	j14 = x14 = crypto_le32dec(in + 8);
    157 	j15 = x15 = crypto_le32dec(in + 12);
    158 
    159 	for (i = crypto_core_ROUNDS; i > 0; i -= 2) {
    160 		QUARTERROUND( x0, x4, x8,x12);
    161 		QUARTERROUND( x1, x5, x9,x13);
    162 		QUARTERROUND( x2, x6,x10,x14);
    163 		QUARTERROUND( x3, x7,x11,x15);
    164 		QUARTERROUND( x0, x5,x10,x15);
    165 		QUARTERROUND( x1, x6,x11,x12);
    166 		QUARTERROUND( x2, x7, x8,x13);
    167 		QUARTERROUND( x3, x4, x9,x14);
    168 	}
    169 
    170 	crypto_le32enc(out + 0, x0 + j0);
    171 	crypto_le32enc(out + 4, x1 + j1);
    172 	crypto_le32enc(out + 8, x2 + j2);
    173 	crypto_le32enc(out + 12, x3 + j3);
    174 	crypto_le32enc(out + 16, x4 + j4);
    175 	crypto_le32enc(out + 20, x5 + j5);
    176 	crypto_le32enc(out + 24, x6 + j6);
    177 	crypto_le32enc(out + 28, x7 + j7);
    178 	crypto_le32enc(out + 32, x8 + j8);
    179 	crypto_le32enc(out + 36, x9 + j9);
    180 	crypto_le32enc(out + 40, x10 + j10);
    181 	crypto_le32enc(out + 44, x11 + j11);
    182 	crypto_le32enc(out + 48, x12 + j12);
    183 	crypto_le32enc(out + 52, x13 + j13);
    184 	crypto_le32enc(out + 56, x14 + j14);
    185 	crypto_le32enc(out + 60, x15 + j15);
    186 }
    187 
    188 /* ChaCha self-test */
    189 
    190 #ifdef _DIAGNOSTIC
    191 
    192 /*
    193  * Test vector for ChaCha20 from
    194  * <http://tools.ietf.org/html/draft-strombergson-chacha-test-vectors-00>,
    195  * test vectors for ChaCha12 and ChaCha8 and for big-endian machines
    196  * generated by the same crypto_core code with crypto_core_ROUNDS and
    197  * crypto_le32enc/dec varied.
    198  */
    199 
    200 static const uint8_t crypto_core_selftest_vector[64] = {
    201 #if _BYTE_ORDER == _LITTLE_ENDIAN
    202 #  if crypto_core_ROUNDS == 8
    203 	0x3e,0x00,0xef,0x2f,0x89,0x5f,0x40,0xd6,
    204 	0x7f,0x5b,0xb8,0xe8,0x1f,0x09,0xa5,0xa1,
    205 	0x2c,0x84,0x0e,0xc3,0xce,0x9a,0x7f,0x3b,
    206 	0x18,0x1b,0xe1,0x88,0xef,0x71,0x1a,0x1e,
    207 	0x98,0x4c,0xe1,0x72,0xb9,0x21,0x6f,0x41,
    208 	0x9f,0x44,0x53,0x67,0x45,0x6d,0x56,0x19,
    209 	0x31,0x4a,0x42,0xa3,0xda,0x86,0xb0,0x01,
    210 	0x38,0x7b,0xfd,0xb8,0x0e,0x0c,0xfe,0x42,
    211 #  elif crypto_core_ROUNDS == 12
    212 	0x9b,0xf4,0x9a,0x6a,0x07,0x55,0xf9,0x53,
    213 	0x81,0x1f,0xce,0x12,0x5f,0x26,0x83,0xd5,
    214 	0x04,0x29,0xc3,0xbb,0x49,0xe0,0x74,0x14,
    215 	0x7e,0x00,0x89,0xa5,0x2e,0xae,0x15,0x5f,
    216 	0x05,0x64,0xf8,0x79,0xd2,0x7a,0xe3,0xc0,
    217 	0x2c,0xe8,0x28,0x34,0xac,0xfa,0x8c,0x79,
    218 	0x3a,0x62,0x9f,0x2c,0xa0,0xde,0x69,0x19,
    219 	0x61,0x0b,0xe8,0x2f,0x41,0x13,0x26,0xbe,
    220 #  elif crypto_core_ROUNDS == 20
    221 	0x76,0xb8,0xe0,0xad,0xa0,0xf1,0x3d,0x90,
    222 	0x40,0x5d,0x6a,0xe5,0x53,0x86,0xbd,0x28,
    223 	0xbd,0xd2,0x19,0xb8,0xa0,0x8d,0xed,0x1a,
    224 	0xa8,0x36,0xef,0xcc,0x8b,0x77,0x0d,0xc7,
    225 	0xda,0x41,0x59,0x7c,0x51,0x57,0x48,0x8d,
    226 	0x77,0x24,0xe0,0x3f,0xb8,0xd8,0x4a,0x37,
    227 	0x6a,0x43,0xb8,0xf4,0x15,0x18,0xa1,0x1c,
    228 	0xc3,0x87,0xb6,0x69,0xb2,0xee,0x65,0x86,
    229 #  else
    230 #    error crypto_core_ROUNDS must be 8, 12, or 20.
    231 #  endif
    232 #elif _BYTE_ORDER == _BIG_ENDIAN
    233 #  if crypto_core_ROUNDS == 8
    234 	0x9a,0x13,0x07,0xe3,0x38,0x18,0x9e,0x99,
    235 	0x15,0x37,0x16,0x4d,0x04,0xe6,0x48,0x9a,
    236 	0x07,0xd6,0xe8,0x7a,0x02,0xf9,0xf5,0xc7,
    237 	0x3f,0xa9,0xc2,0x0a,0xe1,0xc6,0x62,0xea,
    238 	0x80,0xaf,0xb6,0x51,0xca,0x52,0x43,0x87,
    239 	0xe3,0xa6,0xa6,0x61,0x11,0xf5,0xe6,0xcf,
    240 	0x09,0x0f,0xdc,0x9d,0xc3,0xc3,0xbb,0x43,
    241 	0xd7,0xfa,0x70,0x42,0xbf,0xa5,0xee,0xa2,
    242 #  elif crypto_core_ROUNDS == 12
    243 	0xcf,0x6c,0x16,0x48,0xbf,0xf4,0xba,0x85,
    244 	0x32,0x69,0xd3,0x98,0xc8,0x7d,0xcd,0x3f,
    245 	0xdc,0x76,0x6b,0xa2,0x7b,0xcb,0x17,0x4d,
    246 	0x05,0xda,0xdd,0xd8,0x62,0x54,0xbf,0xe0,
    247 	0x65,0xed,0x0e,0xf4,0x01,0x7e,0x3c,0x05,
    248 	0x35,0xb2,0x7a,0x60,0xf3,0x8f,0x12,0x33,
    249 	0x24,0x60,0xcd,0x85,0xfe,0x4c,0xf3,0x39,
    250 	0xb1,0x0e,0x3e,0xe0,0xba,0xa6,0x2f,0xa9,
    251 #  elif crypto_core_ROUNDS == 20
    252 	0x83,0x8b,0xf8,0x75,0xf7,0xde,0x9d,0x8c,
    253 	0x33,0x14,0x72,0x28,0xd1,0xbe,0x88,0xe5,
    254 	0x94,0xb5,0xed,0xb8,0x56,0xb5,0x9e,0x0c,
    255 	0x64,0x6a,0xaf,0xd9,0xa7,0x49,0x10,0x59,
    256 	0xba,0x3a,0x82,0xf8,0x4a,0x70,0x9c,0x00,
    257 	0x82,0x2c,0xae,0xc6,0xd7,0x1c,0x2e,0xda,
    258 	0x2a,0xfb,0x61,0x70,0x2b,0xd1,0xbf,0x8b,
    259 	0x95,0xbc,0x23,0xb6,0x4b,0x60,0x02,0xec,
    260 #  else
    261 #    error crypto_core_ROUNDS must be 8, 12, or 20.
    262 #  endif
    263 #else
    264 #  error Byte order must be little-endian or big-endian.
    265 #endif
    266 };
    267 
    268 static int
    269 crypto_core_selftest(void)
    270 {
    271 	const uint8_t nonce[crypto_core_INPUTBYTES] = {0};
    272 	const uint8_t key[crypto_core_KEYBYTES] = {0};
    273 	uint8_t block[64];
    274 	unsigned i;
    275 
    276 	crypto_core(block, nonce, key, crypto_core_constant32);
    277 	for (i = 0; i < 64; i++) {
    278 		if (block[i] != crypto_core_selftest_vector[i])
    279 			return EIO;
    280 	}
    281 
    282 	return 0;
    283 }
    284 
    285 #else  /* !_DIAGNOSTIC */
    286 
    287 static int
    288 crypto_core_selftest(void)
    289 {
    290 
    291 	return 0;
    292 }
    293 
    294 #endif
    295 
    296 /* PRNG */
    297 
    298 /*
    299  * For a state s, rather than use ChaCha20 as a stream cipher to
    300  * generate the concatenation ChaCha20_s(0) || ChaCha20_s(1) || ..., we
    301  * split ChaCha20_s(0) into s' || x and yield x for the first request,
    302  * split ChaCha20_s'(0) into s'' || y and yield y for the second
    303  * request, &c.  This provides backtracking resistance: an attacker who
    304  * finds s'' can't recover s' or x.
    305  */
    306 
    307 #define	crypto_prng_SEEDBYTES		crypto_core_KEYBYTES
    308 #define	crypto_prng_MAXOUTPUTBYTES	\
    309 	(crypto_core_OUTPUTBYTES - crypto_prng_SEEDBYTES)
    310 
    311 __CTASSERT(sizeof(struct crypto_prng) == crypto_prng_SEEDBYTES);
    312 
    313 static void
    314 crypto_prng_seed(struct crypto_prng *prng, const void *seed)
    315 {
    316 
    317 	(void)memcpy(prng->state, seed, crypto_prng_SEEDBYTES);
    318 }
    319 
    320 static void
    321 crypto_prng_buf(struct crypto_prng *prng, void *buf, size_t n)
    322 {
    323 	const uint8_t nonce[crypto_core_INPUTBYTES] = {0};
    324 	uint8_t output[crypto_core_OUTPUTBYTES];
    325 
    326 	_DIAGASSERT(n <= crypto_prng_MAXOUTPUTBYTES);
    327 	__CTASSERT(sizeof prng->state + crypto_prng_MAXOUTPUTBYTES
    328 	    <= sizeof output);
    329 
    330 	crypto_core(output, nonce, prng->state, crypto_core_constant32);
    331 	(void)memcpy(prng->state, output, sizeof prng->state);
    332 	(void)memcpy(buf, output + sizeof prng->state, n);
    333 	(void)explicit_memset(output, 0, sizeof output);
    334 }
    335 
    336 /* One-time stream: expand short single-use secret into long secret */
    337 
    338 #define	crypto_onetimestream_SEEDBYTES	crypto_core_KEYBYTES
    339 
    340 static void
    341 crypto_onetimestream(const void *seed, void *buf, size_t n)
    342 {
    343 	uint32_t nonce[crypto_core_INPUTBYTES / sizeof(uint32_t)] = {0};
    344 	uint8_t block[crypto_core_OUTPUTBYTES];
    345 	uint8_t *p8, *p32;
    346 	const uint8_t *nonce8 = (const uint8_t *)(void *)nonce;
    347 	size_t ni, nb, nf;
    348 
    349 	/*
    350 	 * Guarantee we can generate up to n bytes.  We have
    351 	 * 2^(8*INPUTBYTES) possible inputs yielding output of
    352 	 * OUTPUTBYTES*2^(8*INPUTBYTES) bytes.  It suffices to require
    353 	 * that sizeof n > (1/CHAR_BIT) log_2 n be less than
    354 	 * (1/CHAR_BIT) log_2 of the total output stream length.  We
    355 	 * have
    356 	 *
    357 	 *	log_2 (o 2^(8 i)) = log_2 o + log_2 2^(8 i)
    358 	 *	  = log_2 o + 8 i.
    359 	 */
    360 #ifndef __lint__
    361 	__CTASSERT(CHAR_BIT * sizeof n <= (ilog2(crypto_core_OUTPUTBYTES) +
    362 		8 * crypto_core_INPUTBYTES));
    363 #endif
    364 
    365 	p8 = buf;
    366 	p32 = (uint8_t *)roundup2((uintptr_t)p8, 4);
    367 	ni = p32 - p8;
    368 	if (n < ni)
    369 		ni = n;
    370 	nb = (n - ni) / sizeof block;
    371 	nf = (n - ni) % sizeof block;
    372 
    373 	_DIAGASSERT(((uintptr_t)p32 & 3) == 0);
    374 	_DIAGASSERT(ni <= n);
    375 	_DIAGASSERT(nb <= (n / sizeof block));
    376 	_DIAGASSERT(nf <= n);
    377 	_DIAGASSERT(n == (ni + (nb * sizeof block) + nf));
    378 	_DIAGASSERT(ni < 4);
    379 	_DIAGASSERT(nf < sizeof block);
    380 
    381 	if (ni) {
    382 		crypto_core(block, nonce8, seed, crypto_core_constant32);
    383 		nonce[0]++;
    384 		(void)memcpy(p8, block, ni);
    385 	}
    386 	while (nb--) {
    387 		crypto_core(p32, nonce8, seed, crypto_core_constant32);
    388 		if (++nonce[0] == 0)
    389 			nonce[1]++;
    390 		p32 += crypto_core_OUTPUTBYTES;
    391 	}
    392 	if (nf) {
    393 		crypto_core(block, nonce8, seed, crypto_core_constant32);
    394 		if (++nonce[0] == 0)
    395 			nonce[1]++;
    396 		(void)memcpy(p32, block, nf);
    397 	}
    398 
    399 	if (ni | nf)
    400 		(void)explicit_memset(block, 0, sizeof block);
    401 }
    402 
    403 /*
    404  * entropy_epoch()
    405  *
    406  *	Return the current entropy epoch, from the sysctl node
    407  *	kern.entropy.epoch.
    408  *
    409  *	The entropy epoch is never zero.  Initially, or on error, it is
    410  *	(unsigned)-1.  It may wrap around but it skips (unsigned)-1 and
    411  *	0 when it does.  Changes happen less than once per second, so
    412  *	wraparound will only affect systems after 136 years of uptime.
    413  *
    414  *	XXX This should get it from a page shared read-only by kernel
    415  *	with userland, but until we implement such a mechanism, this
    416  *	sysctl -- incurring the cost of a syscall -- will have to
    417  *	serve.
    418  */
    419 static unsigned
    420 entropy_epoch(void)
    421 {
    422 	static atomic_int mib0[3];
    423 	static atomic_bool initialized = false;
    424 	int mib[3];
    425 	unsigned epoch = (unsigned)-1;
    426 	size_t epochlen = sizeof(epoch);
    427 
    428 	/*
    429 	 * Resolve kern.entropy.epoch if we haven't already.  Cache it
    430 	 * for the next caller.  Initialization is idempotent, so it's
    431 	 * OK if two threads do it at once.
    432 	 */
    433 	if (atomic_load_explicit(&initialized, memory_order_acquire)) {
    434 		mib[0] = atomic_load_explicit(&mib0[0], memory_order_relaxed);
    435 		mib[1] = atomic_load_explicit(&mib0[1], memory_order_relaxed);
    436 		mib[2] = atomic_load_explicit(&mib0[2], memory_order_relaxed);
    437 	} else {
    438 		size_t nmib = __arraycount(mib);
    439 
    440 		if (sysctlnametomib("kern.entropy.epoch", mib, &nmib) == -1)
    441 			return (unsigned)-1;
    442 		if (nmib != __arraycount(mib))
    443 			return (unsigned)-1;
    444 		atomic_store_explicit(&mib0[0], mib[0], memory_order_relaxed);
    445 		atomic_store_explicit(&mib0[1], mib[1], memory_order_relaxed);
    446 		atomic_store_explicit(&mib0[2], mib[2], memory_order_relaxed);
    447 		atomic_store_explicit(&initialized, true,
    448 		    memory_order_release);
    449 	}
    450 
    451 	if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1)
    452 		return (unsigned)-1;
    453 	if (epochlen != sizeof(epoch))
    454 		return (unsigned)-1;
    455 
    456 	return epoch;
    457 }
    458 
    459 /* arc4random state: per-thread, per-process (zeroed in child on fork) */
    460 
    461 static void
    462 arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data,
    463     size_t datalen)
    464 {
    465 	const int mib[] = { CTL_KERN, KERN_ARND };
    466 	SHA256_CTX ctx;
    467 	uint8_t buf[crypto_prng_SEEDBYTES];
    468 	size_t buflen = sizeof buf;
    469 	unsigned epoch = entropy_epoch();
    470 
    471 	__CTASSERT(sizeof buf == SHA256_DIGEST_LENGTH);
    472 
    473 	SHA256_Init(&ctx);
    474 
    475 	crypto_prng_buf(&prng->arc4_prng, buf, sizeof buf);
    476 	SHA256_Update(&ctx, buf, sizeof buf);
    477 
    478 	if (sysctl(mib, (u_int)__arraycount(mib), buf, &buflen, NULL, 0) == -1)
    479 		abort();
    480 	if (buflen != sizeof buf)
    481 		abort();
    482 	SHA256_Update(&ctx, buf, sizeof buf);
    483 
    484 	if (data != NULL)
    485 		SHA256_Update(&ctx, data, datalen);
    486 
    487 	SHA256_Final(buf, &ctx);
    488 	(void)explicit_memset(&ctx, 0, sizeof ctx);
    489 
    490 	/* reseed(SHA256(prng() || sysctl(KERN_ARND) || data)) */
    491 	crypto_prng_seed(&prng->arc4_prng, buf);
    492 	(void)explicit_memset(buf, 0, sizeof buf);
    493 	prng->arc4_epoch = epoch;
    494 }
    495 
    496 #ifdef _REENTRANT
    497 static struct arc4random_prng *
    498 arc4random_prng_create(void)
    499 {
    500 	struct arc4random_prng *prng;
    501 	const size_t size = roundup(sizeof(*prng), sysconf(_SC_PAGESIZE));
    502 
    503 	prng = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1,
    504 	    0);
    505 	if (prng == MAP_FAILED)
    506 		goto fail0;
    507 	if (minherit(prng, size, MAP_INHERIT_ZERO) == -1)
    508 		goto fail1;
    509 
    510 	return prng;
    511 
    512 fail1:	(void)munmap(prng, size);
    513 fail0:	return NULL;
    514 }
    515 #endif
    516 
    517 #ifdef _REENTRANT
    518 static void
    519 arc4random_prng_destroy(struct arc4random_prng *prng)
    520 {
    521 	const size_t size = roundup(sizeof(*prng), sysconf(_SC_PAGESIZE));
    522 
    523 	(void)explicit_memset(prng, 0, sizeof(*prng));
    524 	(void)munmap(prng, size);
    525 }
    526 #endif
    527 
    528 /* Library state */
    529 
    530 struct arc4random_global_state arc4random_global = {
    531 #ifdef _REENTRANT
    532 	.lock		= MUTEX_INITIALIZER,
    533 #endif
    534 	.initialized	= false,
    535 };
    536 
    537 static struct atfork_callback arc4random_atfork_prepare_cb;
    538 static void
    539 arc4random_atfork_prepare(void)
    540 {
    541 
    542 	mutex_lock(&arc4random_global.lock);
    543 	(void)explicit_memset(&arc4random_global.prng, 0,
    544 	    sizeof arc4random_global.prng);
    545 }
    546 
    547 static struct atfork_callback arc4random_atfork_parent_cb;
    548 static void
    549 arc4random_atfork_parent(void)
    550 {
    551 
    552 	mutex_unlock(&arc4random_global.lock);
    553 }
    554 
    555 static struct atfork_callback arc4random_atfork_child_cb;
    556 static void
    557 arc4random_atfork_child(void)
    558 {
    559 
    560 	mutex_unlock(&arc4random_global.lock);
    561 }
    562 
    563 #ifdef _REENTRANT
    564 static void
    565 arc4random_tsd_destructor(void *p)
    566 {
    567 	struct arc4random_prng *const prng = p;
    568 
    569 	arc4random_prng_destroy(prng);
    570 }
    571 #endif
    572 
    573 static void
    574 arc4random_initialize(void)
    575 {
    576 
    577 	mutex_lock(&arc4random_global.lock);
    578 	if (!arc4random_global.initialized) {
    579 		if (crypto_core_selftest() != 0)
    580 			abort();
    581 		__libc_atfork(
    582 		    &arc4random_atfork_prepare_cb, &arc4random_atfork_prepare,
    583 		    &arc4random_atfork_parent_cb, &arc4random_atfork_parent,
    584 		    &arc4random_atfork_child_cb, &arc4random_atfork_child);
    585 #ifdef _REENTRANT
    586 		if (thr_keycreate(&arc4random_global.thread_key,
    587 			&arc4random_tsd_destructor) == 0)
    588 			arc4random_global.per_thread = true;
    589 #endif
    590 		arc4random_global.initialized = true;
    591 	}
    592 	mutex_unlock(&arc4random_global.lock);
    593 }
    594 
    595 static struct arc4random_prng *
    596 arc4random_prng_get(void)
    597 {
    598 	struct arc4random_prng *prng = NULL;
    599 
    600 	/* Make sure the library is initialized.  */
    601 	if (__predict_false(!arc4random_global.initialized))
    602 		arc4random_initialize();
    603 
    604 #ifdef _REENTRANT
    605 	/* Get or create the per-thread PRNG state.  */
    606 	prng = __predict_true(arc4random_global.per_thread)
    607 	    ? thr_getspecific(arc4random_global.thread_key)
    608 	    : NULL;
    609 	if (__predict_false(prng == NULL) && arc4random_global.per_thread) {
    610 		prng = arc4random_prng_create();
    611 		thr_setspecific(arc4random_global.thread_key, prng);
    612 	}
    613 #endif
    614 
    615 	/* If we can't create it, fall back to the global PRNG.  */
    616 	if (__predict_false(prng == NULL)) {
    617 		mutex_lock(&arc4random_global.lock);
    618 		prng = &arc4random_global.prng;
    619 	}
    620 
    621 	/* Guarantee the PRNG is seeded.  */
    622 	if (__predict_false(prng->arc4_epoch != entropy_epoch()))
    623 		arc4random_prng_addrandom(prng, NULL, 0);
    624 
    625 	return prng;
    626 }
    627 
    628 static void
    629 arc4random_prng_put(struct arc4random_prng *prng)
    630 {
    631 
    632 	/* If we had fallen back to the global PRNG, unlock it.  */
    633 	if (__predict_false(prng == &arc4random_global.prng))
    634 		mutex_unlock(&arc4random_global.lock);
    635 }
    636 
    637 /* Public API */
    638 
    639 uint32_t
    640 arc4random(void)
    641 {
    642 	struct arc4random_prng *prng;
    643 	uint32_t v;
    644 
    645 	prng = arc4random_prng_get();
    646 	crypto_prng_buf(&prng->arc4_prng, &v, sizeof v);
    647 	arc4random_prng_put(prng);
    648 
    649 	return v;
    650 }
    651 
    652 void
    653 arc4random_buf(void *buf, size_t len)
    654 {
    655 	struct arc4random_prng *prng;
    656 
    657 	if (len <= crypto_prng_MAXOUTPUTBYTES) {
    658 		prng = arc4random_prng_get();
    659 		crypto_prng_buf(&prng->arc4_prng, buf, len);
    660 		arc4random_prng_put(prng);
    661 	} else {
    662 		uint8_t seed[crypto_onetimestream_SEEDBYTES];
    663 
    664 		prng = arc4random_prng_get();
    665 		crypto_prng_buf(&prng->arc4_prng, seed, sizeof seed);
    666 		arc4random_prng_put(prng);
    667 
    668 		crypto_onetimestream(seed, buf, len);
    669 		(void)explicit_memset(seed, 0, sizeof seed);
    670 	}
    671 }
    672 
    673 uint32_t
    674 arc4random_uniform(uint32_t bound)
    675 {
    676 	struct arc4random_prng *prng;
    677 	uint32_t minimum, r;
    678 
    679 	/*
    680 	 * We want a uniform random choice in [0, n), and arc4random()
    681 	 * makes a uniform random choice in [0, 2^32).  If we reduce
    682 	 * that modulo n, values in [0, 2^32 mod n) will be represented
    683 	 * slightly more than values in [2^32 mod n, n).  Instead we
    684 	 * choose only from [2^32 mod n, 2^32) by rejecting samples in
    685 	 * [0, 2^32 mod n), to avoid counting the extra representative
    686 	 * of [0, 2^32 mod n).  To compute 2^32 mod n, note that
    687 	 *
    688 	 *	2^32 mod n = 2^32 mod n - 0
    689 	 *	  = 2^32 mod n - n mod n
    690 	 *	  = (2^32 - n) mod n,
    691 	 *
    692 	 * the last of which is what we compute in 32-bit arithmetic.
    693 	 */
    694 	minimum = (-bound % bound);
    695 
    696 	prng = arc4random_prng_get();
    697 	do crypto_prng_buf(&prng->arc4_prng, &r, sizeof r);
    698 	while (__predict_false(r < minimum));
    699 	arc4random_prng_put(prng);
    700 
    701 	return (r % bound);
    702 }
    703 
    704 void
    705 arc4random_stir(void)
    706 {
    707 	struct arc4random_prng *prng;
    708 
    709 	prng = arc4random_prng_get();
    710 	arc4random_prng_addrandom(prng, NULL, 0);
    711 	arc4random_prng_put(prng);
    712 }
    713 
    714 /*
    715  * Silly signature here is for hysterical raisins.  Should instead be
    716  * const void *data and size_t datalen.
    717  */
    718 void
    719 arc4random_addrandom(u_char *data, int datalen)
    720 {
    721 	struct arc4random_prng *prng;
    722 
    723 	_DIAGASSERT(0 <= datalen);
    724 
    725 	prng = arc4random_prng_get();
    726 	arc4random_prng_addrandom(prng, data, datalen);
    727 	arc4random_prng_put(prng);
    728 }
    729 
    730 #ifdef _ARC4RANDOM_TEST
    731 
    732 #include <sys/wait.h>
    733 
    734 #include <err.h>
    735 #include <stdio.h>
    736 
    737 int
    738 main(int argc __unused, char **argv __unused)
    739 {
    740 	unsigned char gubbish[] = "random gubbish";
    741 	const uint8_t zero64[64] = {0};
    742 	uint8_t buf[2048];
    743 	unsigned i, a, n;
    744 
    745 	/* Test arc4random: should not be deterministic.  */
    746 	if (printf("arc4random: %08"PRIx32"\n", arc4random()) < 0)
    747 		err(1, "printf");
    748 
    749 	/* Test stirring: should definitely not be deterministic.  */
    750 	arc4random_stir();
    751 
    752 	/* Test small buffer.  */
    753 	arc4random_buf(buf, 8);
    754 	if (printf("arc4randombuf small:") < 0)
    755 		err(1, "printf");
    756 	for (i = 0; i < 8; i++)
    757 		if (printf(" %02x", buf[i]) < 0)
    758 			err(1, "printf");
    759 	if (printf("\n") < 0)
    760 		err(1, "printf");
    761 
    762 	/* Test addrandom: should not make the rest deterministic.  */
    763 	arc4random_addrandom(gubbish, sizeof gubbish);
    764 
    765 	/* Test large buffer.  */
    766 	arc4random_buf(buf, sizeof buf);
    767 	if (printf("arc4randombuf_large:") < 0)
    768 		err(1, "printf");
    769 	for (i = 0; i < sizeof buf; i++)
    770 		if (printf(" %02x", buf[i]) < 0)
    771 			err(1, "printf");
    772 	if (printf("\n") < 0)
    773 		err(1, "printf");
    774 
    775 	/* Test misaligned small and large.  */
    776 	for (a = 0; a < 64; a++) {
    777 		for (n = a; n < sizeof buf; n++) {
    778 			(void)memset(buf, 0, sizeof buf);
    779 			arc4random_buf(buf, n - a);
    780 			if (memcmp(buf + n - a, zero64, a) != 0)
    781 				errx(1, "arc4random buffer overflow 0");
    782 
    783 			(void)memset(buf, 0, sizeof buf);
    784 			arc4random_buf(buf + a, n - a);
    785 			if (memcmp(buf, zero64, a) != 0)
    786 				errx(1, "arc4random buffer overflow 1");
    787 
    788 			if ((2*a) <= n) {
    789 				(void)memset(buf, 0, sizeof buf);
    790 				arc4random_buf(buf + a, n - a - a);
    791 				if (memcmp(buf + n - a, zero64, a) != 0)
    792 					errx(1,
    793 					    "arc4random buffer overflow 2");
    794 			}
    795 		}
    796 	}
    797 
    798 	/* Test fork-safety.  */
    799     {
    800 	pid_t pid, rpid;
    801 	int status;
    802 
    803 	pid = fork();
    804 	switch (pid) {
    805 	case -1:
    806 		err(1, "fork");
    807 	case 0: {
    808 		/*
    809 		 * Verify the epoch has been set to zero by fork.
    810 		 */
    811 		struct arc4random_prng *prng = NULL;
    812 #ifdef _REENTRANT
    813 		prng = thr_getspecific(arc4random_global.thread_key);
    814 #endif
    815 		if (prng == NULL)
    816 			prng = &arc4random_global.prng;
    817 		_exit(prng->arc4_epoch != 0);
    818 	}
    819 	default:
    820 		rpid = waitpid(pid, &status, 0);
    821 		if (rpid == -1)
    822 			err(1, "waitpid");
    823 		if (rpid != pid)
    824 			errx(1, "waitpid returned wrong pid"
    825 			    ": %"PRIdMAX" != %"PRIdMAX,
    826 			    (intmax_t)rpid,
    827 			    (intmax_t)pid);
    828 		if (WIFEXITED(status)) {
    829 			if (WEXITSTATUS(status) != 0)
    830 				errx(1, "child exited with %d",
    831 				    WEXITSTATUS(status));
    832 		} else if (WIFSIGNALED(status)) {
    833 			errx(1, "child terminated on signal %d",
    834 			    WTERMSIG(status));
    835 		} else {
    836 			errx(1, "child died mysteriously: %d", status);
    837 		}
    838 	}
    839     }
    840 
    841 	/* XXX Test multithreaded fork safety...?  */
    842 
    843 	return 0;
    844 }
    845 #endif
    846