Home | History | Annotate | Line # | Download | only in gen
arc4random.c revision 1.13
      1 /*	$NetBSD: arc4random.c,v 1.13 2012/03/05 19:40:08 christos Exp $	*/
      2 /*	$OpenBSD: arc4random.c,v 1.6 2001/06/05 05:05:38 pvalchev Exp $	*/
      3 
      4 /*
      5  * Arc4 random number generator for OpenBSD.
      6  * Copyright 1996 David Mazieres <dm (at) lcs.mit.edu>.
      7  *
      8  * Modification and redistribution in source and binary forms is
      9  * permitted provided that due credit is given to the author and the
     10  * OpenBSD project by leaving this copyright notice intact.
     11  */
     12 
     13 /*
     14  * This code is derived from section 17.1 of Applied Cryptography,
     15  * second edition, which describes a stream cipher allegedly
     16  * compatible with RSA Labs "RC4" cipher (the actual description of
     17  * which is a trade secret).  The same algorithm is used as a stream
     18  * cipher called "arcfour" in Tatu Ylonen's ssh package.
     19  *
     20  * Here the stream cipher has been modified always to include the time
     21  * when initializing the state.  That makes it impossible to
     22  * regenerate the same random sequence twice, so this can't be used
     23  * for encryption, but will generate good random numbers.
     24  *
     25  * RC4 is a registered trademark of RSA Laboratories.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 #if defined(LIBC_SCCS) && !defined(lint)
     30 __RCSID("$NetBSD: arc4random.c,v 1.13 2012/03/05 19:40:08 christos Exp $");
     31 #endif /* LIBC_SCCS and not lint */
     32 
     33 #include "namespace.h"
     34 #include "reentrant.h"
     35 #include <fcntl.h>
     36 #include <stdlib.h>
     37 #include <unistd.h>
     38 #include <sys/types.h>
     39 #include <sys/param.h>
     40 #include <sys/time.h>
     41 #include <sys/sysctl.h>
     42 
     43 #ifdef __weak_alias
     44 __weak_alias(arc4random,_arc4random)
     45 #endif
     46 
     47 #define RSIZE 256
     48 struct arc4_stream {
     49 	mutex_t mtx;
     50 	int initialized;
     51 	uint8_t i;
     52 	uint8_t j;
     53 	uint8_t s[RSIZE];
     54 };
     55 
     56 /* XXX lint explodes with an internal error if only mtx is initialized! */
     57 static struct arc4_stream rs = { .i = 0, .mtx = MUTEX_INITIALIZER };
     58 
     59 static inline void arc4_init(struct arc4_stream *);
     60 static inline void arc4_addrandom(struct arc4_stream *, u_char *, int);
     61 static void arc4_stir(struct arc4_stream *);
     62 static inline uint8_t arc4_getbyte(struct arc4_stream *);
     63 static inline uint32_t arc4_getword(struct arc4_stream *);
     64 
     65 static inline void
     66 arc4_init(struct arc4_stream *as)
     67 {
     68 	for (int n = 0; n < RSIZE; n++)
     69 		as->s[n] = n;
     70 	as->i = 0;
     71 	as->j = 0;
     72 
     73 	as->initialized = 1;
     74 	arc4_stir(as);
     75 }
     76 
     77 static inline void
     78 arc4_addrandom(struct arc4_stream *as, u_char *dat, int datlen)
     79 {
     80 	uint8_t si;
     81 
     82 	as->i--;
     83 	for (int n = 0; n < RSIZE; n++) {
     84 		as->i = (as->i + 1);
     85 		si = as->s[as->i];
     86 		as->j = (as->j + si + dat[n % datlen]);
     87 		as->s[as->i] = as->s[as->j];
     88 		as->s[as->j] = si;
     89 	}
     90 	as->j = as->i;
     91 }
     92 
     93 static void
     94 arc4_stir(struct arc4_stream *as)
     95 {
     96 	int rdat[32];
     97 	static const int mib[] = { CTL_KERN, KERN_URND };
     98 	size_t len;
     99 
    100 	/*
    101 	 * This code once opened and read /dev/urandom on each
    102 	 * call.  That causes repeated rekeying of the kernel stream
    103 	 * generator, which is very wasteful.  Because of application
    104 	 * behavior, caching the fd doesn't really help.  So we just
    105 	 * fill up the tank from sysctl, which is a tiny bit slower
    106 	 * for us but much friendlier to other entropy consumers.
    107 	 */
    108 
    109 	for (size_t i = 0; i < __arraycount(rdat); i++) {
    110 		len = sizeof(rdat[i]);
    111 		if (sysctl(mib, 2, &rdat[i], &len, NULL, 0) == -1)
    112 			abort();
    113 	}
    114 
    115 	arc4_addrandom(as, (void *) &rdat, (int)sizeof(rdat));
    116 
    117 	/*
    118 	 * Throw away the first N words of output, as suggested in the
    119 	 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
    120 	 * by Fluher, Mantin, and Shamir.  (N = 256 in our case.)
    121 	 */
    122 	for (size_t j = 0; j < RSIZE * 4; j++)
    123 		arc4_getbyte(as);
    124 }
    125 
    126 static inline uint8_t
    127 arc4_getbyte(struct arc4_stream *as)
    128 {
    129 	uint8_t si, sj;
    130 
    131 	as->i = (as->i + 1);
    132 	si = as->s[as->i];
    133 	as->j = (as->j + si);
    134 	sj = as->s[as->j];
    135 	as->s[as->i] = sj;
    136 	as->s[as->j] = si;
    137 	return (as->s[(si + sj) & 0xff]);
    138 }
    139 
    140 static inline uint32_t
    141 arc4_getword(struct arc4_stream *as)
    142 {
    143 	uint32_t val;
    144 	val = arc4_getbyte(as) << 24;
    145 	val |= arc4_getbyte(as) << 16;
    146 	val |= arc4_getbyte(as) << 8;
    147 	val |= arc4_getbyte(as);
    148 	return val;
    149 }
    150 
    151 static inline void
    152 _arc4random_stir_unlocked(void)
    153 {
    154 	if (__predict_false(!rs.initialized)) {
    155 		arc4_init(&rs);				/* stirs */
    156 	} else {
    157 		arc4_stir(&rs);
    158 	}
    159 }
    160 
    161 void
    162 arc4random_stir(void)
    163 {
    164 #ifdef _REENTRANT
    165 	if (__isthreaded) {
    166 		mutex_lock(&rs.mtx);
    167                 _arc4random_stir_unlocked();
    168 		mutex_unlock(&rs.mtx);
    169 		return;
    170         }
    171 #endif
    172 	_arc4random_stir_unlocked();
    173 }
    174 
    175 static inline void
    176 _arc4random_addrandom_unlocked(u_char *dat, int datlen)
    177 {
    178 	if (__predict_false(rs.initialized)) {
    179 		arc4_init(&rs);
    180 	}
    181 	arc4_addrandom(&rs, dat, datlen);
    182 }
    183 
    184 void
    185 arc4random_addrandom(u_char *dat, int datlen)
    186 {
    187 #ifdef _REENTRANT
    188 	if (__isthreaded) {
    189 		mutex_lock(&rs.mtx);
    190 		_arc4random_addrandom_unlocked(dat, datlen);
    191 		mutex_unlock(&rs.mtx);
    192 		return;
    193 	}
    194 #endif
    195 	_arc4random_addrandom_unlocked(dat, datlen);
    196 }
    197 
    198 static inline uint32_t
    199 _arc4random_unlocked(void)
    200 {
    201 	if (__predict_false(!rs.initialized)) {
    202 		arc4_init(&rs);
    203 	}
    204 	return arc4_getword(&rs);
    205 }
    206 
    207 uint32_t
    208 arc4random(void)
    209 {
    210 	uint32_t v;
    211 #ifdef _REENTRANT
    212 	if (__isthreaded) {
    213 		mutex_lock(&rs.mtx);
    214 		v = _arc4random_unlocked();
    215 		mutex_unlock(&rs.mtx);
    216 		return v;
    217 	}
    218 #endif
    219 	v = _arc4random_unlocked();
    220 	return v;
    221 }
    222 
    223 static void
    224 _arc4random_buf_unlocked(void *buf, size_t len)
    225 {
    226 	uint8_t *bp = buf;
    227 	uint8_t *ep = bp + len;
    228 
    229 	if (__predict_false(!rs.initialized)) {
    230 		arc4_init(&rs);
    231 	}
    232 
    233 	bp[0] = arc4_getbyte(&rs) % 3;
    234 	while (bp[0]--)
    235 		(void)arc4_getbyte(&rs);
    236 
    237 	while (bp < ep)
    238 		*bp++ = arc4_getbyte(&rs);
    239 }
    240 
    241 void
    242 arc4random_buf(void *buf, size_t len)
    243 {
    244 #ifdef _REENTRANT
    245 	if (__isthreaded) {
    246 		mutex_lock(&rs.mtx);
    247 		_arc4random_buf_unlocked(buf, len);
    248 		mutex_unlock(&rs.mtx);
    249 		return;
    250 	} else
    251 #endif
    252 	_arc4random_buf_unlocked(buf, len);
    253 }
    254 
    255 /*-
    256  * Written by Damien Miller.
    257  * With simplifications by Jinmei Tatuya.
    258  */
    259 
    260 /*
    261  * Calculate a uniformly distributed random number less than
    262  * upper_bound avoiding "modulo bias".
    263  *
    264  * Uniformity is achieved by generating new random numbers
    265  * until the one returned is outside the range
    266  * [0, 2^32 % upper_bound[. This guarantees the selected
    267  * random number will be inside the range
    268  * [2^32 % upper_bound, 2^32[ which maps back to
    269  * [0, upper_bound[ after reduction modulo upper_bound.
    270  */
    271 static uint32_t
    272 _arc4random_uniform_unlocked(uint32_t upper_bound)
    273 {
    274 	uint32_t r, min;
    275 
    276 	if (upper_bound < 2)
    277 		return 0;
    278 
    279 #if defined(ULONG_MAX) && (ULONG_MAX > 0xFFFFFFFFUL)
    280 	min = (uint32_t)(0x100000000U % upper_bound);
    281 #else
    282 	/* calculate (2^32 % upper_bound) avoiding 64-bit math */
    283 	if (upper_bound > 0x80000000U)
    284 		/* 2^32 - upper_bound (only one "value area") */
    285 		min = 1 + ~upper_bound;
    286 	else
    287 		/* ((2^32 - x) % x) == (2^32 % x) when x <= 2^31 */
    288 		min = (0xFFFFFFFFU - upper_bound + 1) % upper_bound;
    289 #endif
    290 
    291 	/*
    292 	 * This could theoretically loop forever but each retry has
    293 	 * p > 0.5 (worst case, usually far better) of selecting a
    294 	 * number inside the range we need, so it should rarely need
    295 	 * to re-roll (at all).
    296 	 */
    297 	if (__predict_false(!rs.initialized)) {
    298 		arc4_init(&rs);
    299 	}
    300 	if (arc4_getbyte(&rs) & 1)
    301 		(void)arc4_getbyte(&rs);
    302 	do
    303 		r = arc4_getword(&rs);
    304 	while (r < min);
    305 
    306 	return r % upper_bound;
    307 }
    308 
    309 uint32_t
    310 arc4random_uniform(uint32_t upper_bound)
    311 {
    312 	uint32_t v;
    313 #ifdef _REENTRANT
    314 	if (__isthreaded) {
    315 		mutex_lock(&rs.mtx);
    316 		v = _arc4random_uniform_unlocked(upper_bound);
    317 		mutex_unlock(&rs.mtx);
    318 		return v;
    319 	}
    320 #endif
    321 	v = _arc4random_uniform_unlocked(upper_bound);
    322 	return v;
    323 }
    324