arc4random.c revision 1.12 1 /* $NetBSD: arc4random.c,v 1.12 2012/03/04 00:36:43 tls Exp $ */
2 /* $OpenBSD: arc4random.c,v 1.6 2001/06/05 05:05:38 pvalchev Exp $ */
3
4 /*
5 * Arc4 random number generator for OpenBSD.
6 * Copyright 1996 David Mazieres <dm (at) lcs.mit.edu>.
7 *
8 * Modification and redistribution in source and binary forms is
9 * permitted provided that due credit is given to the author and the
10 * OpenBSD project by leaving this copyright notice intact.
11 */
12
13 /*
14 * This code is derived from section 17.1 of Applied Cryptography,
15 * second edition, which describes a stream cipher allegedly
16 * compatible with RSA Labs "RC4" cipher (the actual description of
17 * which is a trade secret). The same algorithm is used as a stream
18 * cipher called "arcfour" in Tatu Ylonen's ssh package.
19 *
20 * Here the stream cipher has been modified always to include the time
21 * when initializing the state. That makes it impossible to
22 * regenerate the same random sequence twice, so this can't be used
23 * for encryption, but will generate good random numbers.
24 *
25 * RC4 is a registered trademark of RSA Laboratories.
26 */
27
28 #include <sys/cdefs.h>
29 #if defined(LIBC_SCCS) && !defined(lint)
30 __RCSID("$NetBSD: arc4random.c,v 1.12 2012/03/04 00:36:43 tls Exp $");
31 #endif /* LIBC_SCCS and not lint */
32
33 #include "namespace.h"
34 #include "reentrant.h"
35 #include <fcntl.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/time.h>
41 #include <sys/sysctl.h>
42
43 #ifdef __weak_alias
44 __weak_alias(arc4random,_arc4random)
45 #endif
46
47 struct arc4_stream {
48 mutex_t mtx;
49 int initialized;
50 uint8_t i;
51 uint8_t j;
52 uint8_t s[256];
53 };
54
55 /* XXX lint explodes with an internal error if only mtx is initialized! */
56 static struct arc4_stream rs = { .i = 0, .mtx = MUTEX_INITIALIZER };
57
58 static inline void arc4_init(struct arc4_stream *);
59 static inline void arc4_addrandom(struct arc4_stream *, u_char *, int);
60 static void arc4_stir(struct arc4_stream *);
61 static inline uint8_t arc4_getbyte(struct arc4_stream *);
62 static inline uint32_t arc4_getword(struct arc4_stream *);
63
64 static inline void
65 arc4_init(struct arc4_stream *as)
66 {
67 int n;
68
69 for (n = 0; n < 256; n++)
70 as->s[n] = n;
71 as->i = 0;
72 as->j = 0;
73
74 as->initialized = 1;
75 arc4_stir(as);
76 }
77
78 static inline void
79 arc4_addrandom(struct arc4_stream *as, u_char *dat, int datlen)
80 {
81 int n;
82 uint8_t si;
83
84 as->i--;
85 for (n = 0; n < 256; n++) {
86 as->i = (as->i + 1);
87 si = as->s[as->i];
88 as->j = (as->j + si + dat[n % datlen]);
89 as->s[as->i] = as->s[as->j];
90 as->s[as->j] = si;
91 }
92 as->j = as->i;
93 }
94
95 static void
96 arc4_stir(struct arc4_stream *as)
97 {
98 int rdat[128 / sizeof(int)];
99 int n;
100 int mib[2];
101 unsigned int i;
102 size_t len;
103
104 /*
105 * This code once opened and read /dev/urandom on each
106 * call. That causes repeated rekeying of the kernel stream
107 * generator, which is very wasteful. Because of application
108 * behavior, caching the fd doesn't really help. So we just
109 * fill up the tank from sysctl, which is a tiny bit slower
110 * for us but much friendlier to other entropy consumers.
111 */
112
113 mib[0] = CTL_KERN;
114 mib[1] = KERN_URND;
115
116 for (i = 0; i < sizeof(rdat) / sizeof(int); i++) {
117 len = sizeof(rdat[i]);
118 if (sysctl(mib, 2, &rdat[i], &len, NULL, 0) == -1)
119 abort();
120 }
121
122 arc4_addrandom(as, (void *) &rdat, sizeof(rdat));
123
124 /*
125 * Throw away the first N words of output, as suggested in the
126 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
127 * by Fluher, Mantin, and Shamir. (N = 256 in our case.)
128 */
129 for (n = 0; n < 256 * 4; n++)
130 arc4_getbyte(as);
131 }
132
133 static inline uint8_t
134 arc4_getbyte(struct arc4_stream *as)
135 {
136 uint8_t si, sj;
137
138 as->i = (as->i + 1);
139 si = as->s[as->i];
140 as->j = (as->j + si);
141 sj = as->s[as->j];
142 as->s[as->i] = sj;
143 as->s[as->j] = si;
144 return (as->s[(si + sj) & 0xff]);
145 }
146
147 static inline uint32_t
148 arc4_getword(struct arc4_stream *as)
149 {
150 uint32_t val;
151 val = arc4_getbyte(as) << 24;
152 val |= arc4_getbyte(as) << 16;
153 val |= arc4_getbyte(as) << 8;
154 val |= arc4_getbyte(as);
155 return val;
156 }
157
158 static inline void
159 _arc4random_stir_unlocked(void)
160 {
161 if (__predict_false(!rs.initialized)) {
162 arc4_init(&rs); /* stirs */
163 } else {
164 arc4_stir(&rs);
165 }
166 }
167
168 void
169 arc4random_stir(void)
170 {
171 #ifdef _REENTRANT
172 if (__isthreaded) {
173 mutex_lock(&rs.mtx);
174 _arc4random_stir_unlocked();
175 mutex_unlock(&rs.mtx);
176 return;
177 }
178 #endif
179 _arc4random_stir_unlocked();
180 }
181
182 static inline void
183 _arc4random_addrandom_unlocked(u_char *dat, int datlen)
184 {
185 if (__predict_false(rs.initialized)) {
186 arc4_init(&rs);
187 }
188 arc4_addrandom(&rs, dat, datlen);
189 }
190
191 void
192 arc4random_addrandom(u_char *dat, int datlen)
193 {
194 #ifdef _REENTRANT
195 if (__isthreaded) {
196 mutex_lock(&rs.mtx);
197 _arc4random_addrandom_unlocked(dat, datlen);
198 mutex_unlock(&rs.mtx);
199 return;
200 }
201 #endif
202 _arc4random_addrandom_unlocked(dat, datlen);
203 }
204
205 static inline uint32_t
206 _arc4random_unlocked(void)
207 {
208 if (__predict_false(!rs.initialized)) {
209 arc4_init(&rs);
210 }
211 return arc4_getword(&rs);
212 }
213
214 uint32_t
215 arc4random(void)
216 {
217 uint32_t v;
218 #ifdef _REENTRANT
219 if (__isthreaded) {
220 mutex_lock(&rs.mtx);
221 v = _arc4random_unlocked();
222 mutex_unlock(&rs.mtx);
223 return v;
224 }
225 #endif
226 v = _arc4random_unlocked();
227 return v;
228 }
229
230 static void
231 _arc4random_buf_unlocked(void *buf, size_t len)
232 {
233 uint8_t *bp = buf;
234 uint8_t *ep = bp + len;
235
236 if (__predict_false(!rs.initialized)) {
237 arc4_init(&rs);
238 }
239
240 bp[0] = arc4_getbyte(&rs) % 3;
241 while (bp[0]--)
242 (void)arc4_getbyte(&rs);
243
244 while (bp < ep)
245 *bp++ = arc4_getbyte(&rs);
246 }
247
248 void
249 arc4random_buf(void *buf, size_t len)
250 {
251 #ifdef _REENTRANT
252 if (__isthreaded) {
253 mutex_lock(&rs.mtx);
254 _arc4random_buf_unlocked(buf, len);
255 mutex_unlock(&rs.mtx);
256 return;
257 } else
258 #endif
259 _arc4random_buf_unlocked(buf, len);
260 }
261
262 /*-
263 * Written by Damien Miller.
264 * With simplifications by Jinmei Tatuya.
265 */
266
267 /*
268 * Calculate a uniformly distributed random number less than
269 * upper_bound avoiding "modulo bias".
270 *
271 * Uniformity is achieved by generating new random numbers
272 * until the one returned is outside the range
273 * [0, 2^32 % upper_bound[. This guarantees the selected
274 * random number will be inside the range
275 * [2^32 % upper_bound, 2^32[ which maps back to
276 * [0, upper_bound[ after reduction modulo upper_bound.
277 */
278 static uint32_t
279 _arc4random_uniform_unlocked(uint32_t upper_bound)
280 {
281 uint32_t r, min;
282
283 if (upper_bound < 2)
284 return 0;
285
286 #if defined(ULONG_MAX) && (ULONG_MAX > 0xFFFFFFFFUL)
287 min = 0x100000000UL % upper_bound;
288 #else
289 /* calculate (2^32 % upper_bound) avoiding 64-bit math */
290 if (upper_bound > 0x80000000U)
291 /* 2^32 - upper_bound (only one "value area") */
292 min = 1 + ~upper_bound;
293 else
294 /* ((2^32 - x) % x) == (2^32 % x) when x <= 2^31 */
295 min = (0xFFFFFFFFU - upper_bound + 1) % upper_bound;
296 #endif
297
298 /*
299 * This could theoretically loop forever but each retry has
300 * p > 0.5 (worst case, usually far better) of selecting a
301 * number inside the range we need, so it should rarely need
302 * to re-roll (at all).
303 */
304 if (__predict_false(!rs.initialized)) {
305 arc4_init(&rs);
306 }
307 if (arc4_getbyte(&rs) & 1)
308 (void)arc4_getbyte(&rs);
309 do
310 r = arc4_getword(&rs);
311 while (r < min);
312
313 return r % upper_bound;
314 }
315
316 uint32_t
317 arc4random_uniform(uint32_t upper_bound)
318 {
319 uint32_t v;
320 #ifdef _REENTRANT
321 if (__isthreaded) {
322 mutex_lock(&rs.mtx);
323 v = _arc4random_uniform_unlocked(upper_bound);
324 mutex_unlock(&rs.mtx);
325 return v;
326 }
327 #endif
328 v = _arc4random_uniform_unlocked(upper_bound);
329 return v;
330 }
331