arc4random.c revision 1.15 1 /* $NetBSD: arc4random.c,v 1.15 2012/08/18 14:42:46 dsl Exp $ */
2 /* $OpenBSD: arc4random.c,v 1.6 2001/06/05 05:05:38 pvalchev Exp $ */
3
4 /*
5 * Arc4 random number generator for OpenBSD.
6 * Copyright 1996 David Mazieres <dm (at) lcs.mit.edu>.
7 *
8 * Modification and redistribution in source and binary forms is
9 * permitted provided that due credit is given to the author and the
10 * OpenBSD project by leaving this copyright notice intact.
11 */
12
13 /*
14 * This code is derived from section 17.1 of Applied Cryptography,
15 * second edition, which describes a stream cipher allegedly
16 * compatible with RSA Labs "RC4" cipher (the actual description of
17 * which is a trade secret). The same algorithm is used as a stream
18 * cipher called "arcfour" in Tatu Ylonen's ssh package.
19 *
20 * Here the stream cipher has been modified always to include the time
21 * when initializing the state. That makes it impossible to
22 * regenerate the same random sequence twice, so this can't be used
23 * for encryption, but will generate good random numbers.
24 *
25 * RC4 is a registered trademark of RSA Laboratories.
26 */
27
28 #include <sys/cdefs.h>
29 #if defined(LIBC_SCCS) && !defined(lint)
30 __RCSID("$NetBSD: arc4random.c,v 1.15 2012/08/18 14:42:46 dsl Exp $");
31 #endif /* LIBC_SCCS and not lint */
32
33 #include "namespace.h"
34 #include "reentrant.h"
35 #include <fcntl.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/time.h>
41 #include <sys/sysctl.h>
42
43 #ifdef __weak_alias
44 __weak_alias(arc4random,_arc4random)
45 #endif
46
47 #define RSIZE 256
48 struct arc4_stream {
49 mutex_t mtx;
50 int initialized;
51 uint8_t i;
52 uint8_t j;
53 uint8_t s[RSIZE];
54 };
55
56 /* XXX lint explodes with an internal error if only mtx is initialized! */
57 static struct arc4_stream rs = { .i = 0, .mtx = MUTEX_INITIALIZER };
58
59 static inline void arc4_addrandom(struct arc4_stream *, u_char *, int);
60 static void arc4_stir(struct arc4_stream *);
61 static inline uint8_t arc4_getbyte(struct arc4_stream *);
62 static inline uint32_t arc4_getword(struct arc4_stream *);
63
64 static __noinline void
65 arc4_init(struct arc4_stream *as)
66 {
67 int n;
68 for (n = 0; n < RSIZE; n++)
69 as->s[n] = n;
70 as->i = 0;
71 as->j = 0;
72
73 as->initialized = 1;
74 arc4_stir(as);
75 }
76
77 static inline int
78 arc4_check_init(struct arc4_stream *as)
79 {
80 if (__predict_true(rs.initialized))
81 return 0;
82
83 arc4_init(as);
84 return 1;
85 }
86
87 static inline void
88 arc4_addrandom(struct arc4_stream *as, u_char *dat, int datlen)
89 {
90 uint8_t si;
91 int n;
92
93 as->i--;
94 for (n = 0; n < RSIZE; n++) {
95 as->i = (as->i + 1);
96 si = as->s[as->i];
97 as->j = (as->j + si + dat[n % datlen]);
98 as->s[as->i] = as->s[as->j];
99 as->s[as->j] = si;
100 }
101 as->j = as->i;
102 }
103
104 static void
105 arc4_stir(struct arc4_stream *as)
106 {
107 int rdat[32];
108 int mib[] = { CTL_KERN, KERN_URND };
109 size_t len;
110 size_t i, j;
111
112 /*
113 * This code once opened and read /dev/urandom on each
114 * call. That causes repeated rekeying of the kernel stream
115 * generator, which is very wasteful. Because of application
116 * behavior, caching the fd doesn't really help. So we just
117 * fill up the tank from sysctl, which is a tiny bit slower
118 * for us but much friendlier to other entropy consumers.
119 */
120
121 for (i = 0; i < __arraycount(rdat); i++) {
122 len = sizeof(rdat[i]);
123 if (sysctl(mib, 2, &rdat[i], &len, NULL, 0) == -1)
124 abort();
125 }
126
127 arc4_addrandom(as, (void *) &rdat, (int)sizeof(rdat));
128
129 /*
130 * Throw away the first N words of output, as suggested in the
131 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
132 * by Fluher, Mantin, and Shamir. (N = 256 in our case.)
133 */
134 for (j = 0; j < RSIZE * 4; j++)
135 arc4_getbyte(as);
136 }
137
138 static inline uint8_t
139 arc4_getbyte(struct arc4_stream *as)
140 {
141 uint8_t si, sj;
142
143 as->i = (as->i + 1);
144 si = as->s[as->i];
145 as->j = (as->j + si);
146 sj = as->s[as->j];
147 as->s[as->i] = sj;
148 as->s[as->j] = si;
149 return (as->s[(si + sj) & 0xff]);
150 }
151
152 static inline uint32_t
153 arc4_getword(struct arc4_stream *as)
154 {
155 uint32_t val;
156 val = arc4_getbyte(as) << 24;
157 val |= arc4_getbyte(as) << 16;
158 val |= arc4_getbyte(as) << 8;
159 val |= arc4_getbyte(as);
160 return val;
161 }
162
163 static inline void
164 _arc4random_stir_unlocked(void)
165 {
166 if (__predict_false(!arc4_check_init(&rs))) /* init() stirs */
167 arc4_stir(&rs);
168 }
169
170 void
171 arc4random_stir(void)
172 {
173 #ifdef _REENTRANT
174 if (__isthreaded) {
175 mutex_lock(&rs.mtx);
176 _arc4random_stir_unlocked();
177 mutex_unlock(&rs.mtx);
178 return;
179 }
180 #endif
181 _arc4random_stir_unlocked();
182 }
183
184 static inline void
185 _arc4random_addrandom_unlocked(u_char *dat, int datlen)
186 {
187 arc4_check_init(&rs);
188 arc4_addrandom(&rs, dat, datlen);
189 }
190
191 void
192 arc4random_addrandom(u_char *dat, int datlen)
193 {
194 #ifdef _REENTRANT
195 if (__isthreaded) {
196 mutex_lock(&rs.mtx);
197 _arc4random_addrandom_unlocked(dat, datlen);
198 mutex_unlock(&rs.mtx);
199 return;
200 }
201 #endif
202 _arc4random_addrandom_unlocked(dat, datlen);
203 }
204
205 static inline uint32_t
206 _arc4random_unlocked(void)
207 {
208 arc4_check_init(&rs);
209 return arc4_getword(&rs);
210 }
211
212 uint32_t
213 arc4random(void)
214 {
215 uint32_t v;
216 #ifdef _REENTRANT
217 if (__isthreaded) {
218 mutex_lock(&rs.mtx);
219 v = _arc4random_unlocked();
220 mutex_unlock(&rs.mtx);
221 return v;
222 }
223 #endif
224 v = _arc4random_unlocked();
225 return v;
226 }
227
228 static void
229 _arc4random_buf_unlocked(void *buf, size_t len)
230 {
231 uint8_t *bp = buf;
232 uint8_t *ep = bp + len;
233
234 arc4_check_init(&rs);
235
236 bp[0] = arc4_getbyte(&rs) % 3;
237 while (bp[0]--)
238 (void)arc4_getbyte(&rs);
239
240 while (bp < ep)
241 *bp++ = arc4_getbyte(&rs);
242 }
243
244 void
245 arc4random_buf(void *buf, size_t len)
246 {
247 #ifdef _REENTRANT
248 if (__isthreaded) {
249 mutex_lock(&rs.mtx);
250 _arc4random_buf_unlocked(buf, len);
251 mutex_unlock(&rs.mtx);
252 return;
253 } else
254 #endif
255 _arc4random_buf_unlocked(buf, len);
256 }
257
258 /*-
259 * Written by Damien Miller.
260 * With simplifications by Jinmei Tatuya.
261 */
262
263 /*
264 * Calculate a uniformly distributed random number less than
265 * upper_bound avoiding "modulo bias".
266 *
267 * Uniformity is achieved by generating new random numbers
268 * until the one returned is outside the range
269 * [0, 2^32 % upper_bound[. This guarantees the selected
270 * random number will be inside the range
271 * [2^32 % upper_bound, 2^32[ which maps back to
272 * [0, upper_bound[ after reduction modulo upper_bound.
273 */
274 static uint32_t
275 _arc4random_uniform_unlocked(uint32_t upper_bound)
276 {
277 uint32_t r, min;
278
279 if (upper_bound < 2)
280 return 0;
281
282 #if defined(ULONG_MAX) && (ULONG_MAX > 0xFFFFFFFFUL)
283 min = (uint32_t)(0x100000000U % upper_bound);
284 #else
285 /* calculate (2^32 % upper_bound) avoiding 64-bit math */
286 if (upper_bound > 0x80000000U)
287 /* 2^32 - upper_bound (only one "value area") */
288 min = 1 + ~upper_bound;
289 else
290 /* ((2^32 - x) % x) == (2^32 % x) when x <= 2^31 */
291 min = (0xFFFFFFFFU - upper_bound + 1) % upper_bound;
292 #endif
293
294 /*
295 * This could theoretically loop forever but each retry has
296 * p > 0.5 (worst case, usually far better) of selecting a
297 * number inside the range we need, so it should rarely need
298 * to re-roll (at all).
299 */
300 arc4_check_init(&rs);
301 if (arc4_getbyte(&rs) & 1)
302 (void)arc4_getbyte(&rs);
303 do
304 r = arc4_getword(&rs);
305 while (r < min);
306
307 return r % upper_bound;
308 }
309
310 uint32_t
311 arc4random_uniform(uint32_t upper_bound)
312 {
313 uint32_t v;
314 #ifdef _REENTRANT
315 if (__isthreaded) {
316 mutex_lock(&rs.mtx);
317 v = _arc4random_uniform_unlocked(upper_bound);
318 mutex_unlock(&rs.mtx);
319 return v;
320 }
321 #endif
322 v = _arc4random_uniform_unlocked(upper_bound);
323 return v;
324 }
325