arc4random.c revision 1.46 1 /* $NetBSD: arc4random.c,v 1.46 2025/03/09 18:11:55 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Legacy arc4random(3) API from OpenBSD reimplemented using the
34 * ChaCha20 PRF, with per-thread state.
35 *
36 * Security model:
37 * - An attacker who sees some outputs cannot predict past or future
38 * outputs.
39 * - An attacker who sees the PRNG state cannot predict past outputs.
40 * - An attacker who sees a child's PRNG state cannot predict past or
41 * future outputs in the parent, or in other children.
42 *
43 * The arc4random(3) API may abort the process if:
44 *
45 * (a) the crypto self-test fails, or
46 * (b) sysctl(KERN_ARND) fails when reseeding the PRNG.
47 *
48 * The crypto self-test occurs only once, on the first use of any of
49 * the arc4random(3) API. KERN_ARND is unlikely to fail later unless
50 * the kernel is seriously broken.
51 */
52
53 #include <sys/cdefs.h>
54 __RCSID("$NetBSD: arc4random.c,v 1.46 2025/03/09 18:11:55 riastradh Exp $");
55
56 #include "namespace.h"
57 #include "reentrant.h"
58
59 #include <sys/bitops.h>
60 #include <sys/endian.h>
61 #include <sys/errno.h>
62 #include <sys/mman.h>
63 #include <sys/sysctl.h>
64
65 #include <assert.h>
66 #include <sha2.h>
67 #include <stdatomic.h>
68 #include <stdbool.h>
69 #include <stdint.h>
70 #include <stdlib.h>
71 #include <string.h>
72 #include <unistd.h>
73
74 #include "arc4random.h"
75 #include "reentrant.h"
76
77 #ifdef __weak_alias
78 __weak_alias(arc4random,_arc4random)
79 __weak_alias(arc4random_addrandom,_arc4random_addrandom)
80 __weak_alias(arc4random_buf,_arc4random_buf)
81 __weak_alias(arc4random_stir,_arc4random_stir)
82 __weak_alias(arc4random_uniform,_arc4random_uniform)
83 #endif
84
85 /*
86 * For standard ChaCha, use le32dec/le32enc. We don't need that for
87 * the purposes of a nondeterministic random number generator -- we
88 * don't need to be bit-for-bit compatible over any wire.
89 */
90
91 static inline uint32_t
92 crypto_le32dec(const void *p)
93 {
94 uint32_t v;
95
96 (void)memcpy(&v, p, sizeof v);
97
98 return v;
99 }
100
101 static inline void
102 crypto_le32enc(void *p, uint32_t v)
103 {
104
105 (void)memcpy(p, &v, sizeof v);
106 }
107
108 /* ChaCha core */
109
110 #define crypto_core_OUTPUTBYTES 64
111 #define crypto_core_INPUTBYTES 16
112 #define crypto_core_KEYBYTES 32
113 #define crypto_core_CONSTBYTES 16
114
115 #define crypto_core_ROUNDS 20
116
117 static uint32_t
118 rotate(uint32_t u, unsigned c)
119 {
120
121 return (u << c) | (u >> (32 - c));
122 }
123
124 #define QUARTERROUND(a, b, c, d) do { \
125 (a) += (b); (d) ^= (a); (d) = rotate((d), 16); \
126 (c) += (d); (b) ^= (c); (b) = rotate((b), 12); \
127 (a) += (b); (d) ^= (a); (d) = rotate((d), 8); \
128 (c) += (d); (b) ^= (c); (b) = rotate((b), 7); \
129 } while (0)
130
131 static const uint8_t crypto_core_constant32[16] = "expand 32-byte k";
132
133 static void
134 crypto_core(uint8_t *out, const uint8_t *in, const uint8_t *k,
135 const uint8_t *c)
136 {
137 uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15;
138 uint32_t j0,j1,j2,j3,j4,j5,j6,j7,j8,j9,j10,j11,j12,j13,j14,j15;
139 int i;
140
141 j0 = x0 = crypto_le32dec(c + 0);
142 j1 = x1 = crypto_le32dec(c + 4);
143 j2 = x2 = crypto_le32dec(c + 8);
144 j3 = x3 = crypto_le32dec(c + 12);
145 j4 = x4 = crypto_le32dec(k + 0);
146 j5 = x5 = crypto_le32dec(k + 4);
147 j6 = x6 = crypto_le32dec(k + 8);
148 j7 = x7 = crypto_le32dec(k + 12);
149 j8 = x8 = crypto_le32dec(k + 16);
150 j9 = x9 = crypto_le32dec(k + 20);
151 j10 = x10 = crypto_le32dec(k + 24);
152 j11 = x11 = crypto_le32dec(k + 28);
153 j12 = x12 = crypto_le32dec(in + 0);
154 j13 = x13 = crypto_le32dec(in + 4);
155 j14 = x14 = crypto_le32dec(in + 8);
156 j15 = x15 = crypto_le32dec(in + 12);
157
158 for (i = crypto_core_ROUNDS; i > 0; i -= 2) {
159 QUARTERROUND( x0, x4, x8,x12);
160 QUARTERROUND( x1, x5, x9,x13);
161 QUARTERROUND( x2, x6,x10,x14);
162 QUARTERROUND( x3, x7,x11,x15);
163 QUARTERROUND( x0, x5,x10,x15);
164 QUARTERROUND( x1, x6,x11,x12);
165 QUARTERROUND( x2, x7, x8,x13);
166 QUARTERROUND( x3, x4, x9,x14);
167 }
168
169 crypto_le32enc(out + 0, x0 + j0);
170 crypto_le32enc(out + 4, x1 + j1);
171 crypto_le32enc(out + 8, x2 + j2);
172 crypto_le32enc(out + 12, x3 + j3);
173 crypto_le32enc(out + 16, x4 + j4);
174 crypto_le32enc(out + 20, x5 + j5);
175 crypto_le32enc(out + 24, x6 + j6);
176 crypto_le32enc(out + 28, x7 + j7);
177 crypto_le32enc(out + 32, x8 + j8);
178 crypto_le32enc(out + 36, x9 + j9);
179 crypto_le32enc(out + 40, x10 + j10);
180 crypto_le32enc(out + 44, x11 + j11);
181 crypto_le32enc(out + 48, x12 + j12);
182 crypto_le32enc(out + 52, x13 + j13);
183 crypto_le32enc(out + 56, x14 + j14);
184 crypto_le32enc(out + 60, x15 + j15);
185 }
186
187 /* ChaCha self-test */
188
189 /*
190 * Test vector for ChaCha20 from
191 * <http://tools.ietf.org/html/draft-strombergson-chacha-test-vectors-00>,
192 * test vectors for ChaCha12 and ChaCha8 and for big-endian machines
193 * generated by the same crypto_core code with crypto_core_ROUNDS and
194 * crypto_le32enc/dec varied.
195 */
196
197 static const uint8_t crypto_core_selftest_vector[64] = {
198 #if _BYTE_ORDER == _LITTLE_ENDIAN
199 # if crypto_core_ROUNDS == 8
200 0x3e,0x00,0xef,0x2f,0x89,0x5f,0x40,0xd6,
201 0x7f,0x5b,0xb8,0xe8,0x1f,0x09,0xa5,0xa1,
202 0x2c,0x84,0x0e,0xc3,0xce,0x9a,0x7f,0x3b,
203 0x18,0x1b,0xe1,0x88,0xef,0x71,0x1a,0x1e,
204 0x98,0x4c,0xe1,0x72,0xb9,0x21,0x6f,0x41,
205 0x9f,0x44,0x53,0x67,0x45,0x6d,0x56,0x19,
206 0x31,0x4a,0x42,0xa3,0xda,0x86,0xb0,0x01,
207 0x38,0x7b,0xfd,0xb8,0x0e,0x0c,0xfe,0x42,
208 # elif crypto_core_ROUNDS == 12
209 0x9b,0xf4,0x9a,0x6a,0x07,0x55,0xf9,0x53,
210 0x81,0x1f,0xce,0x12,0x5f,0x26,0x83,0xd5,
211 0x04,0x29,0xc3,0xbb,0x49,0xe0,0x74,0x14,
212 0x7e,0x00,0x89,0xa5,0x2e,0xae,0x15,0x5f,
213 0x05,0x64,0xf8,0x79,0xd2,0x7a,0xe3,0xc0,
214 0x2c,0xe8,0x28,0x34,0xac,0xfa,0x8c,0x79,
215 0x3a,0x62,0x9f,0x2c,0xa0,0xde,0x69,0x19,
216 0x61,0x0b,0xe8,0x2f,0x41,0x13,0x26,0xbe,
217 # elif crypto_core_ROUNDS == 20
218 0x76,0xb8,0xe0,0xad,0xa0,0xf1,0x3d,0x90,
219 0x40,0x5d,0x6a,0xe5,0x53,0x86,0xbd,0x28,
220 0xbd,0xd2,0x19,0xb8,0xa0,0x8d,0xed,0x1a,
221 0xa8,0x36,0xef,0xcc,0x8b,0x77,0x0d,0xc7,
222 0xda,0x41,0x59,0x7c,0x51,0x57,0x48,0x8d,
223 0x77,0x24,0xe0,0x3f,0xb8,0xd8,0x4a,0x37,
224 0x6a,0x43,0xb8,0xf4,0x15,0x18,0xa1,0x1c,
225 0xc3,0x87,0xb6,0x69,0xb2,0xee,0x65,0x86,
226 # else
227 # error crypto_core_ROUNDS must be 8, 12, or 20.
228 # endif
229 #elif _BYTE_ORDER == _BIG_ENDIAN
230 # if crypto_core_ROUNDS == 8
231 0x9a,0x13,0x07,0xe3,0x38,0x18,0x9e,0x99,
232 0x15,0x37,0x16,0x4d,0x04,0xe6,0x48,0x9a,
233 0x07,0xd6,0xe8,0x7a,0x02,0xf9,0xf5,0xc7,
234 0x3f,0xa9,0xc2,0x0a,0xe1,0xc6,0x62,0xea,
235 0x80,0xaf,0xb6,0x51,0xca,0x52,0x43,0x87,
236 0xe3,0xa6,0xa6,0x61,0x11,0xf5,0xe6,0xcf,
237 0x09,0x0f,0xdc,0x9d,0xc3,0xc3,0xbb,0x43,
238 0xd7,0xfa,0x70,0x42,0xbf,0xa5,0xee,0xa2,
239 # elif crypto_core_ROUNDS == 12
240 0xcf,0x6c,0x16,0x48,0xbf,0xf4,0xba,0x85,
241 0x32,0x69,0xd3,0x98,0xc8,0x7d,0xcd,0x3f,
242 0xdc,0x76,0x6b,0xa2,0x7b,0xcb,0x17,0x4d,
243 0x05,0xda,0xdd,0xd8,0x62,0x54,0xbf,0xe0,
244 0x65,0xed,0x0e,0xf4,0x01,0x7e,0x3c,0x05,
245 0x35,0xb2,0x7a,0x60,0xf3,0x8f,0x12,0x33,
246 0x24,0x60,0xcd,0x85,0xfe,0x4c,0xf3,0x39,
247 0xb1,0x0e,0x3e,0xe0,0xba,0xa6,0x2f,0xa9,
248 # elif crypto_core_ROUNDS == 20
249 0x83,0x8b,0xf8,0x75,0xf7,0xde,0x9d,0x8c,
250 0x33,0x14,0x72,0x28,0xd1,0xbe,0x88,0xe5,
251 0x94,0xb5,0xed,0xb8,0x56,0xb5,0x9e,0x0c,
252 0x64,0x6a,0xaf,0xd9,0xa7,0x49,0x10,0x59,
253 0xba,0x3a,0x82,0xf8,0x4a,0x70,0x9c,0x00,
254 0x82,0x2c,0xae,0xc6,0xd7,0x1c,0x2e,0xda,
255 0x2a,0xfb,0x61,0x70,0x2b,0xd1,0xbf,0x8b,
256 0x95,0xbc,0x23,0xb6,0x4b,0x60,0x02,0xec,
257 # else
258 # error crypto_core_ROUNDS must be 8, 12, or 20.
259 # endif
260 #else
261 # error Byte order must be little-endian or big-endian.
262 #endif
263 };
264
265 static int
266 crypto_core_selftest(void)
267 {
268 const uint8_t nonce[crypto_core_INPUTBYTES] = {0};
269 const uint8_t key[crypto_core_KEYBYTES] = {0};
270 uint8_t block[64];
271 unsigned i;
272
273 crypto_core(block, nonce, key, crypto_core_constant32);
274 for (i = 0; i < 64; i++) {
275 if (block[i] != crypto_core_selftest_vector[i])
276 return EIO;
277 }
278
279 return 0;
280 }
281
282 /* PRNG */
283
284 /*
285 * For a state s, rather than use ChaCha20 as a stream cipher to
286 * generate the concatenation ChaCha20_s(0) || ChaCha20_s(1) || ..., we
287 * split ChaCha20_s(0) into s' || x and yield x for the first request,
288 * split ChaCha20_s'(0) into s'' || y and yield y for the second
289 * request, &c. This provides backtracking resistance: an attacker who
290 * finds s'' can't recover s' or x.
291 */
292
293 #define crypto_prng_SEEDBYTES crypto_core_KEYBYTES
294 #define crypto_prng_MAXOUTPUTBYTES \
295 (crypto_core_OUTPUTBYTES - crypto_prng_SEEDBYTES)
296
297 __CTASSERT(sizeof(struct crypto_prng) == crypto_prng_SEEDBYTES);
298
299 static void
300 crypto_prng_seed(struct crypto_prng *prng, const void *seed)
301 {
302
303 (void)memcpy(prng->state, seed, crypto_prng_SEEDBYTES);
304 }
305
306 static void
307 crypto_prng_buf(struct crypto_prng *prng, void *buf, size_t n)
308 {
309 const uint8_t nonce[crypto_core_INPUTBYTES] = {0};
310 uint8_t output[crypto_core_OUTPUTBYTES];
311
312 _DIAGASSERT(n <= crypto_prng_MAXOUTPUTBYTES);
313 __CTASSERT(sizeof prng->state + crypto_prng_MAXOUTPUTBYTES
314 <= sizeof output);
315
316 crypto_core(output, nonce, prng->state, crypto_core_constant32);
317 (void)memcpy(prng->state, output, sizeof prng->state);
318 (void)memcpy(buf, output + sizeof prng->state, n);
319 (void)explicit_memset(output, 0, sizeof output);
320 }
321
322 /* One-time stream: expand short single-use secret into long secret */
323
324 #define crypto_onetimestream_SEEDBYTES crypto_core_KEYBYTES
325
326 static void
327 crypto_onetimestream(const void *seed, void *buf, size_t n)
328 {
329 uint32_t nonce[crypto_core_INPUTBYTES / sizeof(uint32_t)] = {0};
330 uint8_t block[crypto_core_OUTPUTBYTES];
331 uint8_t *p8, *p32;
332 const uint8_t *nonce8 = (const uint8_t *)(void *)nonce;
333 size_t ni, nb, nf;
334
335 /*
336 * Guarantee we can generate up to n bytes. We have
337 * 2^(8*INPUTBYTES) possible inputs yielding output of
338 * OUTPUTBYTES*2^(8*INPUTBYTES) bytes. It suffices to require
339 * that sizeof n > (1/CHAR_BIT) log_2 n be less than
340 * (1/CHAR_BIT) log_2 of the total output stream length. We
341 * have
342 *
343 * log_2 (o 2^(8 i)) = log_2 o + log_2 2^(8 i)
344 * = log_2 o + 8 i.
345 */
346 #ifndef __lint__
347 __CTASSERT(CHAR_BIT * sizeof n <= (ilog2(crypto_core_OUTPUTBYTES) +
348 8 * crypto_core_INPUTBYTES));
349 #endif
350
351 p8 = buf;
352 p32 = (uint8_t *)roundup2((uintptr_t)p8, 4);
353 ni = p32 - p8;
354 if (n < ni)
355 ni = n;
356 nb = (n - ni) / sizeof block;
357 nf = (n - ni) % sizeof block;
358
359 _DIAGASSERT(((uintptr_t)p32 & 3) == 0);
360 _DIAGASSERT(ni <= n);
361 _DIAGASSERT(nb <= (n / sizeof block));
362 _DIAGASSERT(nf <= n);
363 _DIAGASSERT(n == (ni + (nb * sizeof block) + nf));
364 _DIAGASSERT(ni < 4);
365 _DIAGASSERT(nf < sizeof block);
366
367 if (ni) {
368 crypto_core(block, nonce8, seed, crypto_core_constant32);
369 nonce[0]++;
370 (void)memcpy(p8, block, ni);
371 }
372 while (nb--) {
373 crypto_core(p32, nonce8, seed, crypto_core_constant32);
374 if (++nonce[0] == 0)
375 nonce[1]++;
376 p32 += crypto_core_OUTPUTBYTES;
377 }
378 if (nf) {
379 crypto_core(block, nonce8, seed, crypto_core_constant32);
380 if (++nonce[0] == 0)
381 nonce[1]++;
382 (void)memcpy(p32, block, nf);
383 }
384
385 if (ni | nf)
386 (void)explicit_memset(block, 0, sizeof block);
387 }
388
389 /*
390 * entropy_epoch()
391 *
392 * Return the current entropy epoch, from the sysctl node
393 * kern.entropy.epoch.
394 *
395 * The entropy epoch is never zero. Initially, or on error, it is
396 * (unsigned)-1. It may wrap around but it skips (unsigned)-1 and
397 * 0 when it does. Changes happen less than once per second, so
398 * wraparound will only affect systems after 136 years of uptime.
399 *
400 * XXX This should get it from a page shared read-only by kernel
401 * with userland, but until we implement such a mechanism, this
402 * sysctl -- incurring the cost of a syscall -- will have to
403 * serve.
404 */
405 static unsigned
406 entropy_epoch(void)
407 {
408 static atomic_int mib0[3];
409 static atomic_bool initialized = false;
410 int mib[3];
411 unsigned epoch = (unsigned)-1;
412 size_t epochlen = sizeof(epoch);
413
414 /*
415 * Resolve kern.entropy.epoch if we haven't already. Cache it
416 * for the next caller. Initialization is idempotent, so it's
417 * OK if two threads do it at once.
418 */
419 if (atomic_load_explicit(&initialized, memory_order_acquire)) {
420 mib[0] = atomic_load_explicit(&mib0[0], memory_order_relaxed);
421 mib[1] = atomic_load_explicit(&mib0[1], memory_order_relaxed);
422 mib[2] = atomic_load_explicit(&mib0[2], memory_order_relaxed);
423 } else {
424 size_t nmib = __arraycount(mib);
425
426 if (sysctlnametomib("kern.entropy.epoch", mib, &nmib) == -1)
427 return (unsigned)-1;
428 if (nmib != __arraycount(mib))
429 return (unsigned)-1;
430 atomic_store_explicit(&mib0[0], mib[0], memory_order_relaxed);
431 atomic_store_explicit(&mib0[1], mib[1], memory_order_relaxed);
432 atomic_store_explicit(&mib0[2], mib[2], memory_order_relaxed);
433 atomic_store_explicit(&initialized, true,
434 memory_order_release);
435 }
436
437 if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1)
438 return (unsigned)-1;
439 if (epochlen != sizeof(epoch))
440 return (unsigned)-1;
441
442 return epoch;
443 }
444
445 /* arc4random state: per-thread, per-process (zeroed in child on fork) */
446
447 static void
448 arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data,
449 size_t datalen)
450 {
451 const int mib[] = { CTL_KERN, KERN_ARND };
452 SHA256_CTX ctx;
453 uint8_t buf[crypto_prng_SEEDBYTES];
454 size_t buflen = sizeof buf;
455 unsigned epoch = entropy_epoch();
456
457 __CTASSERT(sizeof buf == SHA256_DIGEST_LENGTH);
458
459 SHA256_Init(&ctx);
460
461 crypto_prng_buf(&prng->arc4_prng, buf, sizeof buf);
462 SHA256_Update(&ctx, buf, sizeof buf);
463
464 if (sysctl(mib, (u_int)__arraycount(mib), buf, &buflen, NULL, 0) == -1)
465 abort();
466 if (buflen != sizeof buf)
467 abort();
468 SHA256_Update(&ctx, buf, sizeof buf);
469
470 if (data != NULL)
471 SHA256_Update(&ctx, data, datalen);
472
473 SHA256_Final(buf, &ctx);
474 (void)explicit_memset(&ctx, 0, sizeof ctx);
475
476 /* reseed(SHA256(prng() || sysctl(KERN_ARND) || data)) */
477 crypto_prng_seed(&prng->arc4_prng, buf);
478 (void)explicit_memset(buf, 0, sizeof buf);
479 prng->arc4_epoch = epoch;
480 }
481
482 #ifdef _REENTRANT
483 static struct arc4random_prng *
484 arc4random_prng_create(void)
485 {
486 struct arc4random_prng *prng;
487 const size_t size = roundup(sizeof(*prng), sysconf(_SC_PAGESIZE));
488
489 prng = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1,
490 0);
491 if (prng == MAP_FAILED)
492 goto fail0;
493 if (minherit(prng, size, MAP_INHERIT_ZERO) == -1)
494 goto fail1;
495
496 return prng;
497
498 fail1: (void)munmap(prng, size);
499 fail0: return NULL;
500 }
501 #endif
502
503 #ifdef _REENTRANT
504 static void
505 arc4random_prng_destroy(struct arc4random_prng *prng)
506 {
507 const size_t size = roundup(sizeof(*prng), sysconf(_SC_PAGESIZE));
508
509 (void)explicit_memset(prng, 0, sizeof(*prng));
510 (void)munmap(prng, size);
511 }
512 #endif
513
514 /* Library state */
515
516 struct arc4random_global_state arc4random_global = {
517 #ifdef _REENTRANT
518 .lock = MUTEX_INITIALIZER,
519 #endif
520 .once = ONCE_INITIALIZER,
521 };
522
523 static void
524 arc4random_atfork_prepare(void)
525 {
526
527 mutex_lock(&arc4random_global.lock);
528 (void)explicit_memset(&arc4random_global.prng, 0,
529 sizeof arc4random_global.prng);
530 }
531
532 static void
533 arc4random_atfork_parent(void)
534 {
535
536 mutex_unlock(&arc4random_global.lock);
537 }
538
539 static void
540 arc4random_atfork_child(void)
541 {
542
543 mutex_unlock(&arc4random_global.lock);
544 }
545
546 #ifdef _REENTRANT
547 static void
548 arc4random_tsd_destructor(void *p)
549 {
550 struct arc4random_prng *const prng = p;
551
552 arc4random_prng_destroy(prng);
553 }
554 #endif
555
556 static void
557 arc4random_initialize(void)
558 {
559
560 /*
561 * If the crypto software is broken, abort -- something is
562 * severely wrong with this process image.
563 */
564 if (crypto_core_selftest() != 0)
565 abort();
566
567 /*
568 * Set up a pthread_atfork handler to lock the global state
569 * around fork so that if forked children can't use the
570 * per-thread state, they can take the lock and use the global
571 * state without deadlock. If this fails, we will fall back to
572 * PRNG state on the stack reinitialized from the kernel
573 * entropy pool at every call.
574 */
575 if (pthread_atfork(&arc4random_atfork_prepare,
576 &arc4random_atfork_parent, &arc4random_atfork_child)
577 == 0)
578 arc4random_global.forksafe = true;
579
580 /*
581 * For multithreaded builds, try to allocate a per-thread PRNG
582 * state to avoid contention due to arc4random.
583 */
584 #ifdef _REENTRANT
585 if (thr_keycreate(&arc4random_global.thread_key,
586 &arc4random_tsd_destructor) == 0)
587 arc4random_global.per_thread = true;
588 #endif
589
590 /*
591 * Note that the arc4random library state has been initialized
592 * for the sake of automatic tests.
593 */
594 arc4random_global.initialized = true;
595 }
596
597 static struct arc4random_prng *
598 arc4random_prng_get(struct arc4random_prng *fallback)
599 {
600 struct arc4random_prng *prng = NULL;
601
602 /* Make sure the library is initialized. */
603 thr_once(&arc4random_global.once, &arc4random_initialize);
604
605 #ifdef _REENTRANT
606 /* Get or create the per-thread PRNG state. */
607 prng = __predict_true(arc4random_global.per_thread)
608 ? thr_getspecific(arc4random_global.thread_key)
609 : NULL;
610 if (__predict_false(prng == NULL) && arc4random_global.per_thread) {
611 prng = arc4random_prng_create();
612 thr_setspecific(arc4random_global.thread_key, prng);
613 }
614 #endif
615
616 /*
617 * If we can't create it, fall back to the global PRNG -- or an
618 * on-stack PRNG, in the unlikely event that pthread_atfork
619 * failed, which we have to seed from scratch each time
620 * (suboptimal, but unlikely, so not worth optimizing).
621 */
622 if (__predict_false(prng == NULL)) {
623 if (__predict_true(arc4random_global.forksafe)) {
624 mutex_lock(&arc4random_global.lock);
625 prng = &arc4random_global.prng;
626 } else {
627 prng = fallback;
628 memset(prng, 0, sizeof(*prng));
629 }
630 }
631
632 /* Guarantee the PRNG is seeded. */
633 if (__predict_false(prng->arc4_epoch != entropy_epoch()))
634 arc4random_prng_addrandom(prng, NULL, 0);
635
636 return prng;
637 }
638
639 static void
640 arc4random_prng_put(struct arc4random_prng *prng,
641 struct arc4random_prng *fallback)
642 {
643
644 /*
645 * If we had to use a stack fallback, zero it before we return
646 * so that after we return we avoid leaving secrets on the
647 * stack that could recover the parent's future outputs in an
648 * unprivileged forked child (of course, we can't guarantee
649 * that the compiler hasn't spilled anything; this is
650 * best-effort, not a guarantee).
651 */
652 if (__predict_false(prng == fallback))
653 explicit_memset(fallback, 0, sizeof(*fallback));
654
655 /* If we had fallen back to the global PRNG, unlock it. */
656 if (__predict_false(prng == &arc4random_global.prng))
657 mutex_unlock(&arc4random_global.lock);
658 }
659
660 /* Public API */
661
662 uint32_t
663 arc4random(void)
664 {
665 struct arc4random_prng *prng, fallback;
666 uint32_t v;
667
668 prng = arc4random_prng_get(&fallback);
669 crypto_prng_buf(&prng->arc4_prng, &v, sizeof v);
670 arc4random_prng_put(prng, &fallback);
671
672 return v;
673 }
674
675 void
676 arc4random_buf(void *buf, size_t len)
677 {
678 struct arc4random_prng *prng, fallback;
679
680 if (len <= crypto_prng_MAXOUTPUTBYTES) {
681 prng = arc4random_prng_get(&fallback);
682 crypto_prng_buf(&prng->arc4_prng, buf, len);
683 arc4random_prng_put(prng, &fallback);
684 } else {
685 uint8_t seed[crypto_onetimestream_SEEDBYTES];
686
687 prng = arc4random_prng_get(&fallback);
688 crypto_prng_buf(&prng->arc4_prng, seed, sizeof seed);
689 arc4random_prng_put(prng, &fallback);
690
691 crypto_onetimestream(seed, buf, len);
692 (void)explicit_memset(seed, 0, sizeof seed);
693 }
694 }
695
696 uint32_t
697 arc4random_uniform(uint32_t bound)
698 {
699 struct arc4random_prng *prng, fallback;
700 uint32_t minimum, r;
701
702 /*
703 * We want a uniform random choice in [0, n), and arc4random()
704 * makes a uniform random choice in [0, 2^32). If we reduce
705 * that modulo n, values in [0, 2^32 mod n) will be represented
706 * slightly more than values in [2^32 mod n, n). Instead we
707 * choose only from [2^32 mod n, 2^32) by rejecting samples in
708 * [0, 2^32 mod n), to avoid counting the extra representative
709 * of [0, 2^32 mod n). To compute 2^32 mod n, note that
710 *
711 * 2^32 mod n = 2^32 mod n - 0
712 * = 2^32 mod n - n mod n
713 * = (2^32 - n) mod n,
714 *
715 * the last of which is what we compute in 32-bit arithmetic.
716 */
717 minimum = (-bound % bound);
718
719 prng = arc4random_prng_get(&fallback);
720 do crypto_prng_buf(&prng->arc4_prng, &r, sizeof r);
721 while (__predict_false(r < minimum));
722 arc4random_prng_put(prng, &fallback);
723
724 return (r % bound);
725 }
726
727 void
728 arc4random_stir(void)
729 {
730 struct arc4random_prng *prng, fallback;
731
732 prng = arc4random_prng_get(&fallback);
733 arc4random_prng_addrandom(prng, NULL, 0);
734 arc4random_prng_put(prng, &fallback);
735 }
736
737 /*
738 * Silly signature here is for hysterical raisins. Should instead be
739 * const void *data and size_t datalen.
740 */
741 void
742 arc4random_addrandom(u_char *data, int datalen)
743 {
744 struct arc4random_prng *prng, fallback;
745
746 _DIAGASSERT(0 <= datalen);
747
748 prng = arc4random_prng_get(&fallback);
749 arc4random_prng_addrandom(prng, data, datalen);
750 arc4random_prng_put(prng, &fallback);
751 }
752
753 #ifdef _ARC4RANDOM_TEST
754
755 #include <sys/wait.h>
756
757 #include <err.h>
758 #include <stdio.h>
759
760 int
761 main(int argc __unused, char **argv __unused)
762 {
763 unsigned char gubbish[] = "random gubbish";
764 const uint8_t zero64[64] = {0};
765 uint8_t buf[2048];
766 unsigned i, a, n;
767
768 /* Test arc4random: should not be deterministic. */
769 if (printf("arc4random: %08"PRIx32"\n", arc4random()) < 0)
770 err(1, "printf");
771
772 /* Test stirring: should definitely not be deterministic. */
773 arc4random_stir();
774
775 /* Test small buffer. */
776 arc4random_buf(buf, 8);
777 if (printf("arc4randombuf small:") < 0)
778 err(1, "printf");
779 for (i = 0; i < 8; i++)
780 if (printf(" %02x", buf[i]) < 0)
781 err(1, "printf");
782 if (printf("\n") < 0)
783 err(1, "printf");
784
785 /* Test addrandom: should not make the rest deterministic. */
786 arc4random_addrandom(gubbish, sizeof gubbish);
787
788 /* Test large buffer. */
789 arc4random_buf(buf, sizeof buf);
790 if (printf("arc4randombuf_large:") < 0)
791 err(1, "printf");
792 for (i = 0; i < sizeof buf; i++)
793 if (printf(" %02x", buf[i]) < 0)
794 err(1, "printf");
795 if (printf("\n") < 0)
796 err(1, "printf");
797
798 /* Test misaligned small and large. */
799 for (a = 0; a < 64; a++) {
800 for (n = a; n < sizeof buf; n++) {
801 (void)memset(buf, 0, sizeof buf);
802 arc4random_buf(buf, n - a);
803 if (memcmp(buf + n - a, zero64, a) != 0)
804 errx(1, "arc4random buffer overflow 0");
805
806 (void)memset(buf, 0, sizeof buf);
807 arc4random_buf(buf + a, n - a);
808 if (memcmp(buf, zero64, a) != 0)
809 errx(1, "arc4random buffer overflow 1");
810
811 if ((2*a) <= n) {
812 (void)memset(buf, 0, sizeof buf);
813 arc4random_buf(buf + a, n - a - a);
814 if (memcmp(buf + n - a, zero64, a) != 0)
815 errx(1,
816 "arc4random buffer overflow 2");
817 }
818 }
819 }
820
821 /* Test fork-safety. */
822 {
823 pid_t pid, rpid;
824 int status;
825
826 pid = fork();
827 switch (pid) {
828 case -1:
829 err(1, "fork");
830 case 0: {
831 /*
832 * Verify the epoch has been set to zero by fork.
833 */
834 struct arc4random_prng *prng = NULL;
835 #ifdef _REENTRANT
836 prng = arc4random_global.per_thread
837 ? thr_getspecific(arc4random_global.thread_key)
838 : NULL;
839 #endif
840 if (prng == NULL)
841 prng = &arc4random_global.prng;
842 _exit(prng->arc4_epoch != 0);
843 }
844 default:
845 rpid = waitpid(pid, &status, 0);
846 if (rpid == -1)
847 err(1, "waitpid");
848 if (rpid != pid)
849 errx(1, "waitpid returned wrong pid"
850 ": %"PRIdMAX" != %"PRIdMAX,
851 (intmax_t)rpid,
852 (intmax_t)pid);
853 if (WIFEXITED(status)) {
854 if (WEXITSTATUS(status) != 0)
855 errx(1, "child exited with %d",
856 WEXITSTATUS(status));
857 } else if (WIFSIGNALED(status)) {
858 errx(1, "child terminated on signal %d",
859 WTERMSIG(status));
860 } else {
861 errx(1, "child died mysteriously: %d", status);
862 }
863 }
864 }
865
866 /* XXX Test multithreaded fork safety...? */
867
868 return 0;
869 }
870 #endif
871