arc4random.c revision 1.11 1 /* $NetBSD: arc4random.c,v 1.11 2012/02/27 04:25:12 tls Exp $ */
2 /* $OpenBSD: arc4random.c,v 1.6 2001/06/05 05:05:38 pvalchev Exp $ */
3
4 /*
5 * Arc4 random number generator for OpenBSD.
6 * Copyright 1996 David Mazieres <dm (at) lcs.mit.edu>.
7 *
8 * Modification and redistribution in source and binary forms is
9 * permitted provided that due credit is given to the author and the
10 * OpenBSD project by leaving this copyright notice intact.
11 */
12
13 /*
14 * This code is derived from section 17.1 of Applied Cryptography,
15 * second edition, which describes a stream cipher allegedly
16 * compatible with RSA Labs "RC4" cipher (the actual description of
17 * which is a trade secret). The same algorithm is used as a stream
18 * cipher called "arcfour" in Tatu Ylonen's ssh package.
19 *
20 * Here the stream cipher has been modified always to include the time
21 * when initializing the state. That makes it impossible to
22 * regenerate the same random sequence twice, so this can't be used
23 * for encryption, but will generate good random numbers.
24 *
25 * RC4 is a registered trademark of RSA Laboratories.
26 */
27
28 #include <sys/cdefs.h>
29 #if defined(LIBC_SCCS) && !defined(lint)
30 __RCSID("$NetBSD: arc4random.c,v 1.11 2012/02/27 04:25:12 tls Exp $");
31 #endif /* LIBC_SCCS and not lint */
32
33 #include "namespace.h"
34 #include "reentrant.h"
35 #include <fcntl.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/time.h>
41 #include <sys/sysctl.h>
42
43 #ifdef __weak_alias
44 __weak_alias(arc4random,_arc4random)
45 #endif
46
47 struct arc4_stream {
48 mutex_t mtx;
49 uint8_t i;
50 uint8_t j;
51 uint8_t s[256];
52 };
53
54 static int rs_initialized;
55 /* XXX lint explodes with an internal error if only mtx is initialized! */
56 static struct arc4_stream rs = { .i = 0, .mtx = MUTEX_INITIALIZER };
57
58 static inline void arc4_init(struct arc4_stream *);
59 static inline void arc4_addrandom(struct arc4_stream *, u_char *, int);
60 static void arc4_stir(struct arc4_stream *);
61 static inline uint8_t arc4_getbyte(struct arc4_stream *);
62 static inline uint32_t arc4_getword(struct arc4_stream *);
63
64 static inline void
65 arc4_init(struct arc4_stream *as)
66 {
67 int n;
68
69 for (n = 0; n < 256; n++)
70 as->s[n] = n;
71 as->i = 0;
72 as->j = 0;
73 }
74
75 static inline void
76 arc4_addrandom(struct arc4_stream *as, u_char *dat, int datlen)
77 {
78 int n;
79 uint8_t si;
80
81 as->i--;
82 for (n = 0; n < 256; n++) {
83 as->i = (as->i + 1);
84 si = as->s[as->i];
85 as->j = (as->j + si + dat[n % datlen]);
86 as->s[as->i] = as->s[as->j];
87 as->s[as->j] = si;
88 }
89 as->j = as->i;
90 }
91
92 static void
93 arc4_stir(struct arc4_stream *as)
94 {
95 int rdat[128 / sizeof(int)];
96 int n;
97 int mib[2];
98 unsigned int i;
99 size_t len;
100
101 /*
102 * This code once opened and read /dev/urandom on each
103 * call. That causes repeated rekeying of the kernel stream
104 * generator, which is very wasteful. Because of application
105 * behavior, caching the fd doesn't really help. So we just
106 * fill up the tank from sysctl, which is a tiny bit slower
107 * for us but much friendlier to other entropy consumers.
108 */
109
110 mib[0] = CTL_KERN;
111 mib[1] = KERN_URND;
112
113 for (i = 0; i < sizeof(rdat) / sizeof(int); i++) {
114 len = sizeof(rdat[i]);
115 if (sysctl(mib, 2, &rdat[i], &len, NULL, 0) == -1)
116 abort();
117 }
118
119 arc4_addrandom(as, (void *) &rdat, sizeof(rdat));
120
121 /*
122 * Throw away the first N words of output, as suggested in the
123 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
124 * by Fluher, Mantin, and Shamir. (N = 256 in our case.)
125 */
126 for (n = 0; n < 256 * 4; n++)
127 arc4_getbyte(as);
128 }
129
130 static inline uint8_t
131 arc4_getbyte(struct arc4_stream *as)
132 {
133 uint8_t si, sj;
134
135 as->i = (as->i + 1);
136 si = as->s[as->i];
137 as->j = (as->j + si);
138 sj = as->s[as->j];
139 as->s[as->i] = sj;
140 as->s[as->j] = si;
141 return (as->s[(si + sj) & 0xff]);
142 }
143
144 static inline uint32_t
145 arc4_getword(struct arc4_stream *as)
146 {
147 uint32_t val;
148 val = arc4_getbyte(as) << 24;
149 val |= arc4_getbyte(as) << 16;
150 val |= arc4_getbyte(as) << 8;
151 val |= arc4_getbyte(as);
152 return val;
153 }
154
155 static inline void
156 _arc4random_stir_unlocked(void)
157 {
158 if (!rs_initialized) {
159 arc4_init(&rs);
160 rs_initialized = 1;
161 }
162 arc4_stir(&rs);
163 }
164
165 void
166 arc4random_stir(void)
167 {
168 #ifdef _REENTRANT
169 if (__isthreaded) {
170 mutex_lock(&rs.mtx);
171 _arc4random_stir_unlocked();
172 mutex_unlock(&rs.mtx);
173 return;
174 }
175 #endif
176 _arc4random_stir_unlocked();
177 }
178
179 static inline void
180 _arc4random_addrandom_unlocked(u_char *dat, int datlen)
181 {
182 if (!rs_initialized)
183 arc4_stir(&rs);
184 arc4_addrandom(&rs, dat, datlen);
185 }
186
187 void
188 arc4random_addrandom(u_char *dat, int datlen)
189 {
190 #ifdef _REENTRANT
191 if (__isthreaded) {
192 mutex_lock(&rs.mtx);
193 _arc4random_addrandom_unlocked(dat, datlen);
194 mutex_unlock(&rs.mtx);
195 return;
196 }
197 #endif
198 _arc4random_addrandom_unlocked(dat, datlen);
199 }
200
201 static inline uint32_t
202 _arc4random_unlocked(void)
203 {
204 if (!rs_initialized)
205 arc4_stir(&rs);
206 return arc4_getword(&rs);
207 }
208
209 uint32_t
210 arc4random(void)
211 {
212 uint32_t v;
213 #ifdef _REENTRANT
214 if (__isthreaded) {
215 mutex_lock(&rs.mtx);
216 v = _arc4random_unlocked();
217 mutex_unlock(&rs.mtx);
218 return v;
219 }
220 #endif
221 v = _arc4random_unlocked();
222 return v;
223 }
224
225 static void
226 _arc4random_buf_unlocked(void *buf, size_t len)
227 {
228 uint8_t *bp = buf;
229 uint8_t *ep = bp + len;
230
231 bp[0] = arc4_getbyte(&rs) % 3;
232 while (bp[0]--)
233 (void)arc4_getbyte(&rs);
234
235 while (bp < ep)
236 *bp++ = arc4_getbyte(&rs);
237 }
238
239 void
240 arc4random_buf(void *buf, size_t len)
241 {
242 #ifdef _REENTRANT
243 if (__isthreaded) {
244 mutex_lock(&rs.mtx);
245 _arc4random_buf_unlocked(buf, len);
246 mutex_unlock(&rs.mtx);
247 return;
248 } else
249 #endif
250 _arc4random_buf_unlocked(buf, len);
251 }
252
253 /*-
254 * Written by Damien Miller.
255 * With simplifications by Jinmei Tatuya.
256 */
257
258 /*
259 * Calculate a uniformly distributed random number less than
260 * upper_bound avoiding "modulo bias".
261 *
262 * Uniformity is achieved by generating new random numbers
263 * until the one returned is outside the range
264 * [0, 2^32 % upper_bound[. This guarantees the selected
265 * random number will be inside the range
266 * [2^32 % upper_bound, 2^32[ which maps back to
267 * [0, upper_bound[ after reduction modulo upper_bound.
268 */
269 static uint32_t
270 _arc4random_uniform_unlocked(uint32_t upper_bound)
271 {
272 uint32_t r, min;
273
274 if (upper_bound < 2)
275 return 0;
276
277 #if defined(ULONG_MAX) && (ULONG_MAX > 0xFFFFFFFFUL)
278 min = 0x100000000UL % upper_bound;
279 #else
280 /* calculate (2^32 % upper_bound) avoiding 64-bit math */
281 if (upper_bound > 0x80000000U)
282 /* 2^32 - upper_bound (only one "value area") */
283 min = 1 + ~upper_bound;
284 else
285 /* ((2^32 - x) % x) == (2^32 % x) when x <= 2^31 */
286 min = (0xFFFFFFFFU - upper_bound + 1) % upper_bound;
287 #endif
288
289 /*
290 * This could theoretically loop forever but each retry has
291 * p > 0.5 (worst case, usually far better) of selecting a
292 * number inside the range we need, so it should rarely need
293 * to re-roll (at all).
294 */
295 if (!rs_initialized)
296 arc4_stir(&rs);
297 if (arc4_getbyte(&rs) & 1)
298 (void)arc4_getbyte(&rs);
299 do
300 r = arc4_getword(&rs);
301 while (r < min);
302
303 return r % upper_bound;
304 }
305
306 uint32_t
307 arc4random_uniform(uint32_t upper_bound)
308 {
309 uint32_t v;
310 #ifdef _REENTRANT
311 if (__isthreaded) {
312 mutex_lock(&rs.mtx);
313 v = _arc4random_uniform_unlocked(upper_bound);
314 mutex_unlock(&rs.mtx);
315 return v;
316 }
317 #endif
318 v = _arc4random_uniform_unlocked(upper_bound);
319 return v;
320 }
321