Home | History | Annotate | Line # | Download | only in gen
arc4random.c revision 1.44
      1 /*	$NetBSD: arc4random.c,v 1.44 2025/03/06 00:53:26 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Legacy arc4random(3) API from OpenBSD reimplemented using the
     34  * ChaCha20 PRF, with per-thread state.
     35  *
     36  * Security model:
     37  * - An attacker who sees some outputs cannot predict past or future
     38  *   outputs.
     39  * - An attacker who sees the PRNG state cannot predict past outputs.
     40  * - An attacker who sees a child's PRNG state cannot predict past or
     41  *   future outputs in the parent, or in other children.
     42  *
     43  * The arc4random(3) API may abort the process if:
     44  *
     45  * (a) the crypto self-test fails,
     46  * (b) pthread_atfork fails, or
     47  * (c) sysctl(KERN_ARND) fails when reseeding the PRNG.
     48  *
     49  * The crypto self-test and pthread_atfork occur only once, on the
     50  * first use of any of the arc4random(3) API.  KERN_ARND is unlikely to
     51  * fail later unless the kernel is seriously broken.
     52  */
     53 
     54 #include <sys/cdefs.h>
     55 __RCSID("$NetBSD: arc4random.c,v 1.44 2025/03/06 00:53:26 riastradh Exp $");
     56 
     57 #include "namespace.h"
     58 #include "reentrant.h"
     59 
     60 #include <sys/bitops.h>
     61 #include <sys/endian.h>
     62 #include <sys/errno.h>
     63 #include <sys/mman.h>
     64 #include <sys/sysctl.h>
     65 
     66 #include <assert.h>
     67 #include <sha2.h>
     68 #include <stdatomic.h>
     69 #include <stdbool.h>
     70 #include <stdint.h>
     71 #include <stdlib.h>
     72 #include <string.h>
     73 #include <unistd.h>
     74 
     75 #include "arc4random.h"
     76 #include "reentrant.h"
     77 
     78 #ifdef __weak_alias
     79 __weak_alias(arc4random,_arc4random)
     80 __weak_alias(arc4random_addrandom,_arc4random_addrandom)
     81 __weak_alias(arc4random_buf,_arc4random_buf)
     82 __weak_alias(arc4random_stir,_arc4random_stir)
     83 __weak_alias(arc4random_uniform,_arc4random_uniform)
     84 #endif
     85 
     86 /*
     87  * For standard ChaCha, use le32dec/le32enc.  We don't need that for
     88  * the purposes of a nondeterministic random number generator -- we
     89  * don't need to be bit-for-bit compatible over any wire.
     90  */
     91 
     92 static inline uint32_t
     93 crypto_le32dec(const void *p)
     94 {
     95 	uint32_t v;
     96 
     97 	(void)memcpy(&v, p, sizeof v);
     98 
     99 	return v;
    100 }
    101 
    102 static inline void
    103 crypto_le32enc(void *p, uint32_t v)
    104 {
    105 
    106 	(void)memcpy(p, &v, sizeof v);
    107 }
    108 
    109 /* ChaCha core */
    110 
    111 #define	crypto_core_OUTPUTBYTES	64
    112 #define	crypto_core_INPUTBYTES	16
    113 #define	crypto_core_KEYBYTES	32
    114 #define	crypto_core_CONSTBYTES	16
    115 
    116 #define	crypto_core_ROUNDS	20
    117 
    118 static uint32_t
    119 rotate(uint32_t u, unsigned c)
    120 {
    121 
    122 	return (u << c) | (u >> (32 - c));
    123 }
    124 
    125 #define	QUARTERROUND(a, b, c, d) do {					      \
    126 	(a) += (b); (d) ^= (a); (d) = rotate((d), 16);			      \
    127 	(c) += (d); (b) ^= (c); (b) = rotate((b), 12);			      \
    128 	(a) += (b); (d) ^= (a); (d) = rotate((d),  8);			      \
    129 	(c) += (d); (b) ^= (c); (b) = rotate((b),  7);			      \
    130 } while (0)
    131 
    132 static const uint8_t crypto_core_constant32[16] = "expand 32-byte k";
    133 
    134 static void
    135 crypto_core(uint8_t *out, const uint8_t *in, const uint8_t *k,
    136     const uint8_t *c)
    137 {
    138 	uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15;
    139 	uint32_t j0,j1,j2,j3,j4,j5,j6,j7,j8,j9,j10,j11,j12,j13,j14,j15;
    140 	int i;
    141 
    142 	j0 = x0 = crypto_le32dec(c + 0);
    143 	j1 = x1 = crypto_le32dec(c + 4);
    144 	j2 = x2 = crypto_le32dec(c + 8);
    145 	j3 = x3 = crypto_le32dec(c + 12);
    146 	j4 = x4 = crypto_le32dec(k + 0);
    147 	j5 = x5 = crypto_le32dec(k + 4);
    148 	j6 = x6 = crypto_le32dec(k + 8);
    149 	j7 = x7 = crypto_le32dec(k + 12);
    150 	j8 = x8 = crypto_le32dec(k + 16);
    151 	j9 = x9 = crypto_le32dec(k + 20);
    152 	j10 = x10 = crypto_le32dec(k + 24);
    153 	j11 = x11 = crypto_le32dec(k + 28);
    154 	j12 = x12 = crypto_le32dec(in + 0);
    155 	j13 = x13 = crypto_le32dec(in + 4);
    156 	j14 = x14 = crypto_le32dec(in + 8);
    157 	j15 = x15 = crypto_le32dec(in + 12);
    158 
    159 	for (i = crypto_core_ROUNDS; i > 0; i -= 2) {
    160 		QUARTERROUND( x0, x4, x8,x12);
    161 		QUARTERROUND( x1, x5, x9,x13);
    162 		QUARTERROUND( x2, x6,x10,x14);
    163 		QUARTERROUND( x3, x7,x11,x15);
    164 		QUARTERROUND( x0, x5,x10,x15);
    165 		QUARTERROUND( x1, x6,x11,x12);
    166 		QUARTERROUND( x2, x7, x8,x13);
    167 		QUARTERROUND( x3, x4, x9,x14);
    168 	}
    169 
    170 	crypto_le32enc(out + 0, x0 + j0);
    171 	crypto_le32enc(out + 4, x1 + j1);
    172 	crypto_le32enc(out + 8, x2 + j2);
    173 	crypto_le32enc(out + 12, x3 + j3);
    174 	crypto_le32enc(out + 16, x4 + j4);
    175 	crypto_le32enc(out + 20, x5 + j5);
    176 	crypto_le32enc(out + 24, x6 + j6);
    177 	crypto_le32enc(out + 28, x7 + j7);
    178 	crypto_le32enc(out + 32, x8 + j8);
    179 	crypto_le32enc(out + 36, x9 + j9);
    180 	crypto_le32enc(out + 40, x10 + j10);
    181 	crypto_le32enc(out + 44, x11 + j11);
    182 	crypto_le32enc(out + 48, x12 + j12);
    183 	crypto_le32enc(out + 52, x13 + j13);
    184 	crypto_le32enc(out + 56, x14 + j14);
    185 	crypto_le32enc(out + 60, x15 + j15);
    186 }
    187 
    188 /* ChaCha self-test */
    189 
    190 /*
    191  * Test vector for ChaCha20 from
    192  * <http://tools.ietf.org/html/draft-strombergson-chacha-test-vectors-00>,
    193  * test vectors for ChaCha12 and ChaCha8 and for big-endian machines
    194  * generated by the same crypto_core code with crypto_core_ROUNDS and
    195  * crypto_le32enc/dec varied.
    196  */
    197 
    198 static const uint8_t crypto_core_selftest_vector[64] = {
    199 #if _BYTE_ORDER == _LITTLE_ENDIAN
    200 #  if crypto_core_ROUNDS == 8
    201 	0x3e,0x00,0xef,0x2f,0x89,0x5f,0x40,0xd6,
    202 	0x7f,0x5b,0xb8,0xe8,0x1f,0x09,0xa5,0xa1,
    203 	0x2c,0x84,0x0e,0xc3,0xce,0x9a,0x7f,0x3b,
    204 	0x18,0x1b,0xe1,0x88,0xef,0x71,0x1a,0x1e,
    205 	0x98,0x4c,0xe1,0x72,0xb9,0x21,0x6f,0x41,
    206 	0x9f,0x44,0x53,0x67,0x45,0x6d,0x56,0x19,
    207 	0x31,0x4a,0x42,0xa3,0xda,0x86,0xb0,0x01,
    208 	0x38,0x7b,0xfd,0xb8,0x0e,0x0c,0xfe,0x42,
    209 #  elif crypto_core_ROUNDS == 12
    210 	0x9b,0xf4,0x9a,0x6a,0x07,0x55,0xf9,0x53,
    211 	0x81,0x1f,0xce,0x12,0x5f,0x26,0x83,0xd5,
    212 	0x04,0x29,0xc3,0xbb,0x49,0xe0,0x74,0x14,
    213 	0x7e,0x00,0x89,0xa5,0x2e,0xae,0x15,0x5f,
    214 	0x05,0x64,0xf8,0x79,0xd2,0x7a,0xe3,0xc0,
    215 	0x2c,0xe8,0x28,0x34,0xac,0xfa,0x8c,0x79,
    216 	0x3a,0x62,0x9f,0x2c,0xa0,0xde,0x69,0x19,
    217 	0x61,0x0b,0xe8,0x2f,0x41,0x13,0x26,0xbe,
    218 #  elif crypto_core_ROUNDS == 20
    219 	0x76,0xb8,0xe0,0xad,0xa0,0xf1,0x3d,0x90,
    220 	0x40,0x5d,0x6a,0xe5,0x53,0x86,0xbd,0x28,
    221 	0xbd,0xd2,0x19,0xb8,0xa0,0x8d,0xed,0x1a,
    222 	0xa8,0x36,0xef,0xcc,0x8b,0x77,0x0d,0xc7,
    223 	0xda,0x41,0x59,0x7c,0x51,0x57,0x48,0x8d,
    224 	0x77,0x24,0xe0,0x3f,0xb8,0xd8,0x4a,0x37,
    225 	0x6a,0x43,0xb8,0xf4,0x15,0x18,0xa1,0x1c,
    226 	0xc3,0x87,0xb6,0x69,0xb2,0xee,0x65,0x86,
    227 #  else
    228 #    error crypto_core_ROUNDS must be 8, 12, or 20.
    229 #  endif
    230 #elif _BYTE_ORDER == _BIG_ENDIAN
    231 #  if crypto_core_ROUNDS == 8
    232 	0x9a,0x13,0x07,0xe3,0x38,0x18,0x9e,0x99,
    233 	0x15,0x37,0x16,0x4d,0x04,0xe6,0x48,0x9a,
    234 	0x07,0xd6,0xe8,0x7a,0x02,0xf9,0xf5,0xc7,
    235 	0x3f,0xa9,0xc2,0x0a,0xe1,0xc6,0x62,0xea,
    236 	0x80,0xaf,0xb6,0x51,0xca,0x52,0x43,0x87,
    237 	0xe3,0xa6,0xa6,0x61,0x11,0xf5,0xe6,0xcf,
    238 	0x09,0x0f,0xdc,0x9d,0xc3,0xc3,0xbb,0x43,
    239 	0xd7,0xfa,0x70,0x42,0xbf,0xa5,0xee,0xa2,
    240 #  elif crypto_core_ROUNDS == 12
    241 	0xcf,0x6c,0x16,0x48,0xbf,0xf4,0xba,0x85,
    242 	0x32,0x69,0xd3,0x98,0xc8,0x7d,0xcd,0x3f,
    243 	0xdc,0x76,0x6b,0xa2,0x7b,0xcb,0x17,0x4d,
    244 	0x05,0xda,0xdd,0xd8,0x62,0x54,0xbf,0xe0,
    245 	0x65,0xed,0x0e,0xf4,0x01,0x7e,0x3c,0x05,
    246 	0x35,0xb2,0x7a,0x60,0xf3,0x8f,0x12,0x33,
    247 	0x24,0x60,0xcd,0x85,0xfe,0x4c,0xf3,0x39,
    248 	0xb1,0x0e,0x3e,0xe0,0xba,0xa6,0x2f,0xa9,
    249 #  elif crypto_core_ROUNDS == 20
    250 	0x83,0x8b,0xf8,0x75,0xf7,0xde,0x9d,0x8c,
    251 	0x33,0x14,0x72,0x28,0xd1,0xbe,0x88,0xe5,
    252 	0x94,0xb5,0xed,0xb8,0x56,0xb5,0x9e,0x0c,
    253 	0x64,0x6a,0xaf,0xd9,0xa7,0x49,0x10,0x59,
    254 	0xba,0x3a,0x82,0xf8,0x4a,0x70,0x9c,0x00,
    255 	0x82,0x2c,0xae,0xc6,0xd7,0x1c,0x2e,0xda,
    256 	0x2a,0xfb,0x61,0x70,0x2b,0xd1,0xbf,0x8b,
    257 	0x95,0xbc,0x23,0xb6,0x4b,0x60,0x02,0xec,
    258 #  else
    259 #    error crypto_core_ROUNDS must be 8, 12, or 20.
    260 #  endif
    261 #else
    262 #  error Byte order must be little-endian or big-endian.
    263 #endif
    264 };
    265 
    266 static int
    267 crypto_core_selftest(void)
    268 {
    269 	const uint8_t nonce[crypto_core_INPUTBYTES] = {0};
    270 	const uint8_t key[crypto_core_KEYBYTES] = {0};
    271 	uint8_t block[64];
    272 	unsigned i;
    273 
    274 	crypto_core(block, nonce, key, crypto_core_constant32);
    275 	for (i = 0; i < 64; i++) {
    276 		if (block[i] != crypto_core_selftest_vector[i])
    277 			return EIO;
    278 	}
    279 
    280 	return 0;
    281 }
    282 
    283 /* PRNG */
    284 
    285 /*
    286  * For a state s, rather than use ChaCha20 as a stream cipher to
    287  * generate the concatenation ChaCha20_s(0) || ChaCha20_s(1) || ..., we
    288  * split ChaCha20_s(0) into s' || x and yield x for the first request,
    289  * split ChaCha20_s'(0) into s'' || y and yield y for the second
    290  * request, &c.  This provides backtracking resistance: an attacker who
    291  * finds s'' can't recover s' or x.
    292  */
    293 
    294 #define	crypto_prng_SEEDBYTES		crypto_core_KEYBYTES
    295 #define	crypto_prng_MAXOUTPUTBYTES	\
    296 	(crypto_core_OUTPUTBYTES - crypto_prng_SEEDBYTES)
    297 
    298 __CTASSERT(sizeof(struct crypto_prng) == crypto_prng_SEEDBYTES);
    299 
    300 static void
    301 crypto_prng_seed(struct crypto_prng *prng, const void *seed)
    302 {
    303 
    304 	(void)memcpy(prng->state, seed, crypto_prng_SEEDBYTES);
    305 }
    306 
    307 static void
    308 crypto_prng_buf(struct crypto_prng *prng, void *buf, size_t n)
    309 {
    310 	const uint8_t nonce[crypto_core_INPUTBYTES] = {0};
    311 	uint8_t output[crypto_core_OUTPUTBYTES];
    312 
    313 	_DIAGASSERT(n <= crypto_prng_MAXOUTPUTBYTES);
    314 	__CTASSERT(sizeof prng->state + crypto_prng_MAXOUTPUTBYTES
    315 	    <= sizeof output);
    316 
    317 	crypto_core(output, nonce, prng->state, crypto_core_constant32);
    318 	(void)memcpy(prng->state, output, sizeof prng->state);
    319 	(void)memcpy(buf, output + sizeof prng->state, n);
    320 	(void)explicit_memset(output, 0, sizeof output);
    321 }
    322 
    323 /* One-time stream: expand short single-use secret into long secret */
    324 
    325 #define	crypto_onetimestream_SEEDBYTES	crypto_core_KEYBYTES
    326 
    327 static void
    328 crypto_onetimestream(const void *seed, void *buf, size_t n)
    329 {
    330 	uint32_t nonce[crypto_core_INPUTBYTES / sizeof(uint32_t)] = {0};
    331 	uint8_t block[crypto_core_OUTPUTBYTES];
    332 	uint8_t *p8, *p32;
    333 	const uint8_t *nonce8 = (const uint8_t *)(void *)nonce;
    334 	size_t ni, nb, nf;
    335 
    336 	/*
    337 	 * Guarantee we can generate up to n bytes.  We have
    338 	 * 2^(8*INPUTBYTES) possible inputs yielding output of
    339 	 * OUTPUTBYTES*2^(8*INPUTBYTES) bytes.  It suffices to require
    340 	 * that sizeof n > (1/CHAR_BIT) log_2 n be less than
    341 	 * (1/CHAR_BIT) log_2 of the total output stream length.  We
    342 	 * have
    343 	 *
    344 	 *	log_2 (o 2^(8 i)) = log_2 o + log_2 2^(8 i)
    345 	 *	  = log_2 o + 8 i.
    346 	 */
    347 #ifndef __lint__
    348 	__CTASSERT(CHAR_BIT * sizeof n <= (ilog2(crypto_core_OUTPUTBYTES) +
    349 		8 * crypto_core_INPUTBYTES));
    350 #endif
    351 
    352 	p8 = buf;
    353 	p32 = (uint8_t *)roundup2((uintptr_t)p8, 4);
    354 	ni = p32 - p8;
    355 	if (n < ni)
    356 		ni = n;
    357 	nb = (n - ni) / sizeof block;
    358 	nf = (n - ni) % sizeof block;
    359 
    360 	_DIAGASSERT(((uintptr_t)p32 & 3) == 0);
    361 	_DIAGASSERT(ni <= n);
    362 	_DIAGASSERT(nb <= (n / sizeof block));
    363 	_DIAGASSERT(nf <= n);
    364 	_DIAGASSERT(n == (ni + (nb * sizeof block) + nf));
    365 	_DIAGASSERT(ni < 4);
    366 	_DIAGASSERT(nf < sizeof block);
    367 
    368 	if (ni) {
    369 		crypto_core(block, nonce8, seed, crypto_core_constant32);
    370 		nonce[0]++;
    371 		(void)memcpy(p8, block, ni);
    372 	}
    373 	while (nb--) {
    374 		crypto_core(p32, nonce8, seed, crypto_core_constant32);
    375 		if (++nonce[0] == 0)
    376 			nonce[1]++;
    377 		p32 += crypto_core_OUTPUTBYTES;
    378 	}
    379 	if (nf) {
    380 		crypto_core(block, nonce8, seed, crypto_core_constant32);
    381 		if (++nonce[0] == 0)
    382 			nonce[1]++;
    383 		(void)memcpy(p32, block, nf);
    384 	}
    385 
    386 	if (ni | nf)
    387 		(void)explicit_memset(block, 0, sizeof block);
    388 }
    389 
    390 /*
    391  * entropy_epoch()
    392  *
    393  *	Return the current entropy epoch, from the sysctl node
    394  *	kern.entropy.epoch.
    395  *
    396  *	The entropy epoch is never zero.  Initially, or on error, it is
    397  *	(unsigned)-1.  It may wrap around but it skips (unsigned)-1 and
    398  *	0 when it does.  Changes happen less than once per second, so
    399  *	wraparound will only affect systems after 136 years of uptime.
    400  *
    401  *	XXX This should get it from a page shared read-only by kernel
    402  *	with userland, but until we implement such a mechanism, this
    403  *	sysctl -- incurring the cost of a syscall -- will have to
    404  *	serve.
    405  */
    406 static unsigned
    407 entropy_epoch(void)
    408 {
    409 	static atomic_int mib0[3];
    410 	static atomic_bool initialized = false;
    411 	int mib[3];
    412 	unsigned epoch = (unsigned)-1;
    413 	size_t epochlen = sizeof(epoch);
    414 
    415 	/*
    416 	 * Resolve kern.entropy.epoch if we haven't already.  Cache it
    417 	 * for the next caller.  Initialization is idempotent, so it's
    418 	 * OK if two threads do it at once.
    419 	 */
    420 	if (atomic_load_explicit(&initialized, memory_order_acquire)) {
    421 		mib[0] = atomic_load_explicit(&mib0[0], memory_order_relaxed);
    422 		mib[1] = atomic_load_explicit(&mib0[1], memory_order_relaxed);
    423 		mib[2] = atomic_load_explicit(&mib0[2], memory_order_relaxed);
    424 	} else {
    425 		size_t nmib = __arraycount(mib);
    426 
    427 		if (sysctlnametomib("kern.entropy.epoch", mib, &nmib) == -1)
    428 			return (unsigned)-1;
    429 		if (nmib != __arraycount(mib))
    430 			return (unsigned)-1;
    431 		atomic_store_explicit(&mib0[0], mib[0], memory_order_relaxed);
    432 		atomic_store_explicit(&mib0[1], mib[1], memory_order_relaxed);
    433 		atomic_store_explicit(&mib0[2], mib[2], memory_order_relaxed);
    434 		atomic_store_explicit(&initialized, true,
    435 		    memory_order_release);
    436 	}
    437 
    438 	if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1)
    439 		return (unsigned)-1;
    440 	if (epochlen != sizeof(epoch))
    441 		return (unsigned)-1;
    442 
    443 	return epoch;
    444 }
    445 
    446 /* arc4random state: per-thread, per-process (zeroed in child on fork) */
    447 
    448 static void
    449 arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data,
    450     size_t datalen)
    451 {
    452 	const int mib[] = { CTL_KERN, KERN_ARND };
    453 	SHA256_CTX ctx;
    454 	uint8_t buf[crypto_prng_SEEDBYTES];
    455 	size_t buflen = sizeof buf;
    456 	unsigned epoch = entropy_epoch();
    457 
    458 	__CTASSERT(sizeof buf == SHA256_DIGEST_LENGTH);
    459 
    460 	SHA256_Init(&ctx);
    461 
    462 	crypto_prng_buf(&prng->arc4_prng, buf, sizeof buf);
    463 	SHA256_Update(&ctx, buf, sizeof buf);
    464 
    465 	if (sysctl(mib, (u_int)__arraycount(mib), buf, &buflen, NULL, 0) == -1)
    466 		abort();
    467 	if (buflen != sizeof buf)
    468 		abort();
    469 	SHA256_Update(&ctx, buf, sizeof buf);
    470 
    471 	if (data != NULL)
    472 		SHA256_Update(&ctx, data, datalen);
    473 
    474 	SHA256_Final(buf, &ctx);
    475 	(void)explicit_memset(&ctx, 0, sizeof ctx);
    476 
    477 	/* reseed(SHA256(prng() || sysctl(KERN_ARND) || data)) */
    478 	crypto_prng_seed(&prng->arc4_prng, buf);
    479 	(void)explicit_memset(buf, 0, sizeof buf);
    480 	prng->arc4_epoch = epoch;
    481 }
    482 
    483 #ifdef _REENTRANT
    484 static struct arc4random_prng *
    485 arc4random_prng_create(void)
    486 {
    487 	struct arc4random_prng *prng;
    488 	const size_t size = roundup(sizeof(*prng), sysconf(_SC_PAGESIZE));
    489 
    490 	prng = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1,
    491 	    0);
    492 	if (prng == MAP_FAILED)
    493 		goto fail0;
    494 	if (minherit(prng, size, MAP_INHERIT_ZERO) == -1)
    495 		goto fail1;
    496 
    497 	return prng;
    498 
    499 fail1:	(void)munmap(prng, size);
    500 fail0:	return NULL;
    501 }
    502 #endif
    503 
    504 #ifdef _REENTRANT
    505 static void
    506 arc4random_prng_destroy(struct arc4random_prng *prng)
    507 {
    508 	const size_t size = roundup(sizeof(*prng), sysconf(_SC_PAGESIZE));
    509 
    510 	(void)explicit_memset(prng, 0, sizeof(*prng));
    511 	(void)munmap(prng, size);
    512 }
    513 #endif
    514 
    515 /* Library state */
    516 
    517 struct arc4random_global_state arc4random_global = {
    518 #ifdef _REENTRANT
    519 	.lock		= MUTEX_INITIALIZER,
    520 #endif
    521 	.once		= ONCE_INITIALIZER,
    522 };
    523 
    524 static void
    525 arc4random_atfork_prepare(void)
    526 {
    527 
    528 	mutex_lock(&arc4random_global.lock);
    529 	(void)explicit_memset(&arc4random_global.prng, 0,
    530 	    sizeof arc4random_global.prng);
    531 }
    532 
    533 static void
    534 arc4random_atfork_parent(void)
    535 {
    536 
    537 	mutex_unlock(&arc4random_global.lock);
    538 }
    539 
    540 static void
    541 arc4random_atfork_child(void)
    542 {
    543 
    544 	mutex_unlock(&arc4random_global.lock);
    545 }
    546 
    547 #ifdef _REENTRANT
    548 static void
    549 arc4random_tsd_destructor(void *p)
    550 {
    551 	struct arc4random_prng *const prng = p;
    552 
    553 	arc4random_prng_destroy(prng);
    554 }
    555 #endif
    556 
    557 static void
    558 arc4random_initialize(void)
    559 {
    560 
    561 	/*
    562 	 * If the crypto software is broken, abort -- something is
    563 	 * severely wrong with this process image.
    564 	 */
    565 	if (crypto_core_selftest() != 0)
    566 		abort();
    567 
    568 	/*
    569 	 * Set up a pthread_atfork handler to lock the global state
    570 	 * around fork so that if forked children can't use the
    571 	 * per-thread state, they can take the lock and use the global
    572 	 * state without deadlock.
    573 	 */
    574 	if (pthread_atfork(&arc4random_atfork_prepare,
    575 		&arc4random_atfork_parent, &arc4random_atfork_child)
    576 	    != 0)
    577 		abort();
    578 
    579 	/*
    580 	 * For multithreaded builds, try to allocate a per-thread PRNG
    581 	 * state to avoid contention due to arc4random.
    582 	 */
    583 #ifdef _REENTRANT
    584 	if (thr_keycreate(&arc4random_global.thread_key,
    585 		&arc4random_tsd_destructor) == 0)
    586 		arc4random_global.per_thread = true;
    587 #endif
    588 
    589 	/*
    590 	 * Note that the arc4random library state has been initialized
    591 	 * for the sake of automatic tests.
    592 	 */
    593 	arc4random_global.initialized = true;
    594 }
    595 
    596 static struct arc4random_prng *
    597 arc4random_prng_get(void)
    598 {
    599 	struct arc4random_prng *prng = NULL;
    600 
    601 	/* Make sure the library is initialized.  */
    602 	thr_once(&arc4random_global.once, &arc4random_initialize);
    603 
    604 #ifdef _REENTRANT
    605 	/* Get or create the per-thread PRNG state.  */
    606 	prng = __predict_true(arc4random_global.per_thread)
    607 	    ? thr_getspecific(arc4random_global.thread_key)
    608 	    : NULL;
    609 	if (__predict_false(prng == NULL) && arc4random_global.per_thread) {
    610 		prng = arc4random_prng_create();
    611 		thr_setspecific(arc4random_global.thread_key, prng);
    612 	}
    613 #endif
    614 
    615 	/* If we can't create it, fall back to the global PRNG.  */
    616 	if (__predict_false(prng == NULL)) {
    617 		mutex_lock(&arc4random_global.lock);
    618 		prng = &arc4random_global.prng;
    619 	}
    620 
    621 	/* Guarantee the PRNG is seeded.  */
    622 	if (__predict_false(prng->arc4_epoch != entropy_epoch()))
    623 		arc4random_prng_addrandom(prng, NULL, 0);
    624 
    625 	return prng;
    626 }
    627 
    628 static void
    629 arc4random_prng_put(struct arc4random_prng *prng)
    630 {
    631 
    632 	/* If we had fallen back to the global PRNG, unlock it.  */
    633 	if (__predict_false(prng == &arc4random_global.prng))
    634 		mutex_unlock(&arc4random_global.lock);
    635 }
    636 
    637 /* Public API */
    638 
    639 uint32_t
    640 arc4random(void)
    641 {
    642 	struct arc4random_prng *prng;
    643 	uint32_t v;
    644 
    645 	prng = arc4random_prng_get();
    646 	crypto_prng_buf(&prng->arc4_prng, &v, sizeof v);
    647 	arc4random_prng_put(prng);
    648 
    649 	return v;
    650 }
    651 
    652 void
    653 arc4random_buf(void *buf, size_t len)
    654 {
    655 	struct arc4random_prng *prng;
    656 
    657 	if (len <= crypto_prng_MAXOUTPUTBYTES) {
    658 		prng = arc4random_prng_get();
    659 		crypto_prng_buf(&prng->arc4_prng, buf, len);
    660 		arc4random_prng_put(prng);
    661 	} else {
    662 		uint8_t seed[crypto_onetimestream_SEEDBYTES];
    663 
    664 		prng = arc4random_prng_get();
    665 		crypto_prng_buf(&prng->arc4_prng, seed, sizeof seed);
    666 		arc4random_prng_put(prng);
    667 
    668 		crypto_onetimestream(seed, buf, len);
    669 		(void)explicit_memset(seed, 0, sizeof seed);
    670 	}
    671 }
    672 
    673 uint32_t
    674 arc4random_uniform(uint32_t bound)
    675 {
    676 	struct arc4random_prng *prng;
    677 	uint32_t minimum, r;
    678 
    679 	/*
    680 	 * We want a uniform random choice in [0, n), and arc4random()
    681 	 * makes a uniform random choice in [0, 2^32).  If we reduce
    682 	 * that modulo n, values in [0, 2^32 mod n) will be represented
    683 	 * slightly more than values in [2^32 mod n, n).  Instead we
    684 	 * choose only from [2^32 mod n, 2^32) by rejecting samples in
    685 	 * [0, 2^32 mod n), to avoid counting the extra representative
    686 	 * of [0, 2^32 mod n).  To compute 2^32 mod n, note that
    687 	 *
    688 	 *	2^32 mod n = 2^32 mod n - 0
    689 	 *	  = 2^32 mod n - n mod n
    690 	 *	  = (2^32 - n) mod n,
    691 	 *
    692 	 * the last of which is what we compute in 32-bit arithmetic.
    693 	 */
    694 	minimum = (-bound % bound);
    695 
    696 	prng = arc4random_prng_get();
    697 	do crypto_prng_buf(&prng->arc4_prng, &r, sizeof r);
    698 	while (__predict_false(r < minimum));
    699 	arc4random_prng_put(prng);
    700 
    701 	return (r % bound);
    702 }
    703 
    704 void
    705 arc4random_stir(void)
    706 {
    707 	struct arc4random_prng *prng;
    708 
    709 	prng = arc4random_prng_get();
    710 	arc4random_prng_addrandom(prng, NULL, 0);
    711 	arc4random_prng_put(prng);
    712 }
    713 
    714 /*
    715  * Silly signature here is for hysterical raisins.  Should instead be
    716  * const void *data and size_t datalen.
    717  */
    718 void
    719 arc4random_addrandom(u_char *data, int datalen)
    720 {
    721 	struct arc4random_prng *prng;
    722 
    723 	_DIAGASSERT(0 <= datalen);
    724 
    725 	prng = arc4random_prng_get();
    726 	arc4random_prng_addrandom(prng, data, datalen);
    727 	arc4random_prng_put(prng);
    728 }
    729 
    730 #ifdef _ARC4RANDOM_TEST
    731 
    732 #include <sys/wait.h>
    733 
    734 #include <err.h>
    735 #include <stdio.h>
    736 
    737 int
    738 main(int argc __unused, char **argv __unused)
    739 {
    740 	unsigned char gubbish[] = "random gubbish";
    741 	const uint8_t zero64[64] = {0};
    742 	uint8_t buf[2048];
    743 	unsigned i, a, n;
    744 
    745 	/* Test arc4random: should not be deterministic.  */
    746 	if (printf("arc4random: %08"PRIx32"\n", arc4random()) < 0)
    747 		err(1, "printf");
    748 
    749 	/* Test stirring: should definitely not be deterministic.  */
    750 	arc4random_stir();
    751 
    752 	/* Test small buffer.  */
    753 	arc4random_buf(buf, 8);
    754 	if (printf("arc4randombuf small:") < 0)
    755 		err(1, "printf");
    756 	for (i = 0; i < 8; i++)
    757 		if (printf(" %02x", buf[i]) < 0)
    758 			err(1, "printf");
    759 	if (printf("\n") < 0)
    760 		err(1, "printf");
    761 
    762 	/* Test addrandom: should not make the rest deterministic.  */
    763 	arc4random_addrandom(gubbish, sizeof gubbish);
    764 
    765 	/* Test large buffer.  */
    766 	arc4random_buf(buf, sizeof buf);
    767 	if (printf("arc4randombuf_large:") < 0)
    768 		err(1, "printf");
    769 	for (i = 0; i < sizeof buf; i++)
    770 		if (printf(" %02x", buf[i]) < 0)
    771 			err(1, "printf");
    772 	if (printf("\n") < 0)
    773 		err(1, "printf");
    774 
    775 	/* Test misaligned small and large.  */
    776 	for (a = 0; a < 64; a++) {
    777 		for (n = a; n < sizeof buf; n++) {
    778 			(void)memset(buf, 0, sizeof buf);
    779 			arc4random_buf(buf, n - a);
    780 			if (memcmp(buf + n - a, zero64, a) != 0)
    781 				errx(1, "arc4random buffer overflow 0");
    782 
    783 			(void)memset(buf, 0, sizeof buf);
    784 			arc4random_buf(buf + a, n - a);
    785 			if (memcmp(buf, zero64, a) != 0)
    786 				errx(1, "arc4random buffer overflow 1");
    787 
    788 			if ((2*a) <= n) {
    789 				(void)memset(buf, 0, sizeof buf);
    790 				arc4random_buf(buf + a, n - a - a);
    791 				if (memcmp(buf + n - a, zero64, a) != 0)
    792 					errx(1,
    793 					    "arc4random buffer overflow 2");
    794 			}
    795 		}
    796 	}
    797 
    798 	/* Test fork-safety.  */
    799     {
    800 	pid_t pid, rpid;
    801 	int status;
    802 
    803 	pid = fork();
    804 	switch (pid) {
    805 	case -1:
    806 		err(1, "fork");
    807 	case 0: {
    808 		/*
    809 		 * Verify the epoch has been set to zero by fork.
    810 		 */
    811 		struct arc4random_prng *prng = NULL;
    812 #ifdef _REENTRANT
    813 		prng = thr_getspecific(arc4random_global.thread_key);
    814 #endif
    815 		if (prng == NULL)
    816 			prng = &arc4random_global.prng;
    817 		_exit(prng->arc4_epoch != 0);
    818 	}
    819 	default:
    820 		rpid = waitpid(pid, &status, 0);
    821 		if (rpid == -1)
    822 			err(1, "waitpid");
    823 		if (rpid != pid)
    824 			errx(1, "waitpid returned wrong pid"
    825 			    ": %"PRIdMAX" != %"PRIdMAX,
    826 			    (intmax_t)rpid,
    827 			    (intmax_t)pid);
    828 		if (WIFEXITED(status)) {
    829 			if (WEXITSTATUS(status) != 0)
    830 				errx(1, "child exited with %d",
    831 				    WEXITSTATUS(status));
    832 		} else if (WIFSIGNALED(status)) {
    833 			errx(1, "child terminated on signal %d",
    834 			    WTERMSIG(status));
    835 		} else {
    836 			errx(1, "child died mysteriously: %d", status);
    837 		}
    838 	}
    839     }
    840 
    841 	/* XXX Test multithreaded fork safety...?  */
    842 
    843 	return 0;
    844 }
    845 #endif
    846