Home | History | Annotate | Line # | Download | only in kern
subr_cprng.c revision 1.23
      1 /*	$NetBSD: subr_cprng.c,v 1.23 2014/01/17 02:12:48 pooka Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Thor Lancelot Simon and Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.23 2014/01/17 02:12:48 pooka Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/types.h>
     37 #include <sys/condvar.h>
     38 #include <sys/cprng.h>
     39 #include <sys/errno.h>
     40 #include <sys/event.h>		/* XXX struct knote */
     41 #include <sys/fcntl.h>		/* XXX FNONBLOCK */
     42 #include <sys/kernel.h>
     43 #include <sys/kmem.h>
     44 #include <sys/lwp.h>
     45 #include <sys/once.h>
     46 #include <sys/poll.h>		/* XXX POLLIN/POLLOUT/&c. */
     47 #include <sys/select.h>
     48 #include <sys/systm.h>
     49 #include <sys/sysctl.h>
     50 #include <sys/rnd.h>
     51 #include <sys/rndsink.h>
     52 #if DEBUG
     53 #include <sys/rngtest.h>
     54 #endif
     55 
     56 #include <crypto/nist_ctr_drbg/nist_ctr_drbg.h>
     57 
     58 #if defined(__HAVE_CPU_COUNTER)
     59 #include <machine/cpu_counter.h>
     60 #endif
     61 
     62 static int sysctl_kern_urnd(SYSCTLFN_PROTO);
     63 static int sysctl_kern_arnd(SYSCTLFN_PROTO);
     64 
     65 static void	cprng_strong_generate(struct cprng_strong *, void *, size_t);
     66 static void	cprng_strong_reseed(struct cprng_strong *);
     67 static void	cprng_strong_reseed_from(struct cprng_strong *, const void *,
     68 		    size_t, bool);
     69 #if DEBUG
     70 static void	cprng_strong_rngtest(struct cprng_strong *);
     71 #endif
     72 
     73 static rndsink_callback_t	cprng_strong_rndsink_callback;
     74 
     75 void
     76 cprng_init(void)
     77 {
     78 	static struct sysctllog *random_sysctllog;
     79 
     80 	nist_ctr_initialize();
     81 
     82 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
     83 		       CTLFLAG_PERMANENT,
     84 		       CTLTYPE_INT, "urandom",
     85 		       SYSCTL_DESCR("Random integer value"),
     86 		       sysctl_kern_urnd, 0, NULL, 0,
     87 		       CTL_KERN, KERN_URND, CTL_EOL);
     88 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
     89 		       CTLFLAG_PERMANENT,
     90 		       CTLTYPE_INT, "arandom",
     91 		       SYSCTL_DESCR("n bytes of random data"),
     92 		       sysctl_kern_arnd, 0, NULL, 0,
     93 		       CTL_KERN, KERN_ARND, CTL_EOL);
     94 }
     95 
     96 static inline uint32_t
     97 cprng_counter(void)
     98 {
     99 	struct timeval tv;
    100 
    101 #if defined(__HAVE_CPU_COUNTER)
    102 	if (cpu_hascounter())
    103 		return cpu_counter32();
    104 #endif
    105 	if (__predict_false(cold)) {
    106 		/* microtime unsafe if clock not running yet */
    107 		return 0;
    108 	}
    109 	microtime(&tv);
    110 	return (tv.tv_sec * 1000000 + tv.tv_usec);
    111 }
    112 
    113 struct cprng_strong {
    114 	char		cs_name[16];
    115 	int		cs_flags;
    116 	kmutex_t	cs_lock;
    117 	kcondvar_t	cs_cv;
    118 	struct selinfo	cs_selq;
    119 	struct rndsink	*cs_rndsink;
    120 	bool		cs_ready;
    121 	NIST_CTR_DRBG	cs_drbg;
    122 
    123 	/* XXX Kludge for /dev/random `information-theoretic' properties.   */
    124 	unsigned int	cs_remaining;
    125 };
    126 
    127 struct cprng_strong *
    128 cprng_strong_create(const char *name, int ipl, int flags)
    129 {
    130 	const uint32_t cc = cprng_counter();
    131 	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
    132 	    KM_SLEEP);
    133 
    134 	/*
    135 	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
    136 	 * higher than that.
    137 	 */
    138 	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
    139 
    140 	/* Initialize the easy fields.  */
    141 	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
    142 	cprng->cs_flags = flags;
    143 	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
    144 	cv_init(&cprng->cs_cv, cprng->cs_name);
    145 	selinit(&cprng->cs_selq);
    146 	cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES,
    147 	    &cprng_strong_rndsink_callback, cprng);
    148 
    149 	/* Get some initial entropy.  Record whether it is full entropy.  */
    150 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
    151 	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
    152 	    sizeof(seed));
    153 	if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
    154 		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
    155 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
    156 		panic("cprng %s: NIST CTR_DRBG instantiation failed",
    157 		    cprng->cs_name);
    158 	explicit_memset(seed, 0, sizeof(seed));
    159 
    160 	if (ISSET(flags, CPRNG_HARD))
    161 		cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
    162 	else
    163 		cprng->cs_remaining = 0;
    164 
    165 	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
    166 		printf("cprng %s: creating with partial entropy\n",
    167 		    cprng->cs_name);
    168 
    169 	return cprng;
    170 }
    171 
    172 void
    173 cprng_strong_destroy(struct cprng_strong *cprng)
    174 {
    175 
    176 	/*
    177 	 * Destroy the rndsink first to prevent calls to the callback.
    178 	 */
    179 	rndsink_destroy(cprng->cs_rndsink);
    180 
    181 	KASSERT(!cv_has_waiters(&cprng->cs_cv));
    182 #if 0
    183 	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
    184 #endif
    185 
    186 	nist_ctr_drbg_destroy(&cprng->cs_drbg);
    187 	seldestroy(&cprng->cs_selq);
    188 	cv_destroy(&cprng->cs_cv);
    189 	mutex_destroy(&cprng->cs_lock);
    190 
    191 	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
    192 	kmem_free(cprng, sizeof(*cprng));
    193 }
    194 
    195 /*
    196  * Generate some data from cprng.  Block or return zero bytes,
    197  * depending on flags & FNONBLOCK, if cprng was created without
    198  * CPRNG_REKEY_ANY.
    199  */
    200 size_t
    201 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
    202 {
    203 	size_t result;
    204 
    205 	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
    206 	bytes = MIN(bytes, CPRNG_MAX_LEN);
    207 
    208 	mutex_enter(&cprng->cs_lock);
    209 
    210 	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
    211 		if (!cprng->cs_ready)
    212 			cprng_strong_reseed(cprng);
    213 	} else {
    214 		while (!cprng->cs_ready) {
    215 			if (ISSET(flags, FNONBLOCK) ||
    216 			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
    217 			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
    218 				result = 0;
    219 				goto out;
    220 			}
    221 		}
    222 	}
    223 
    224 	/*
    225 	 * Debit the entropy if requested.
    226 	 *
    227 	 * XXX Kludge for /dev/random `information-theoretic' properties.
    228 	 */
    229 	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
    230 		KASSERT(0 < cprng->cs_remaining);
    231 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
    232 		if (bytes < cprng->cs_remaining) {
    233 			cprng->cs_remaining -= bytes;
    234 		} else {
    235 			bytes = cprng->cs_remaining;
    236 			cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
    237 			cprng->cs_ready = false;
    238 			rndsink_schedule(cprng->cs_rndsink);
    239 		}
    240 		KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
    241 		KASSERT(0 < cprng->cs_remaining);
    242 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
    243 	}
    244 
    245 	cprng_strong_generate(cprng, buffer, bytes);
    246 	result = bytes;
    247 
    248 out:	mutex_exit(&cprng->cs_lock);
    249 	return result;
    250 }
    251 
    252 static void	filt_cprng_detach(struct knote *);
    253 static int	filt_cprng_event(struct knote *, long);
    254 
    255 static const struct filterops cprng_filtops =
    256 	{ 1, NULL, filt_cprng_detach, filt_cprng_event };
    257 
    258 int
    259 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
    260 {
    261 
    262 	switch (kn->kn_filter) {
    263 	case EVFILT_READ:
    264 		kn->kn_fop = &cprng_filtops;
    265 		kn->kn_hook = cprng;
    266 		mutex_enter(&cprng->cs_lock);
    267 		SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
    268 		mutex_exit(&cprng->cs_lock);
    269 		return 0;
    270 
    271 	case EVFILT_WRITE:
    272 	default:
    273 		return EINVAL;
    274 	}
    275 }
    276 
    277 static void
    278 filt_cprng_detach(struct knote *kn)
    279 {
    280 	struct cprng_strong *const cprng = kn->kn_hook;
    281 
    282 	mutex_enter(&cprng->cs_lock);
    283 	SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
    284 	mutex_exit(&cprng->cs_lock);
    285 }
    286 
    287 static int
    288 filt_cprng_event(struct knote *kn, long hint)
    289 {
    290 	struct cprng_strong *const cprng = kn->kn_hook;
    291 	int ret;
    292 
    293 	if (hint == NOTE_SUBMIT)
    294 		KASSERT(mutex_owned(&cprng->cs_lock));
    295 	else
    296 		mutex_enter(&cprng->cs_lock);
    297 	if (cprng->cs_ready) {
    298 		kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large?  */
    299 		ret = 1;
    300 	} else {
    301 		ret = 0;
    302 	}
    303 	if (hint == NOTE_SUBMIT)
    304 		KASSERT(mutex_owned(&cprng->cs_lock));
    305 	else
    306 		mutex_exit(&cprng->cs_lock);
    307 
    308 	return ret;
    309 }
    310 
    311 int
    312 cprng_strong_poll(struct cprng_strong *cprng, int events)
    313 {
    314 	int revents;
    315 
    316 	if (!ISSET(events, (POLLIN | POLLRDNORM)))
    317 		return 0;
    318 
    319 	mutex_enter(&cprng->cs_lock);
    320 	if (cprng->cs_ready) {
    321 		revents = (events & (POLLIN | POLLRDNORM));
    322 	} else {
    323 		selrecord(curlwp, &cprng->cs_selq);
    324 		revents = 0;
    325 	}
    326 	mutex_exit(&cprng->cs_lock);
    327 
    328 	return revents;
    329 }
    330 
    331 /*
    332  * XXX Move nist_ctr_drbg_reseed_advised_p and
    333  * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make
    334  * the NIST_CTR_DRBG structure opaque.
    335  */
    336 static bool
    337 nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg)
    338 {
    339 
    340 	return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2));
    341 }
    342 
    343 static bool
    344 nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg)
    345 {
    346 
    347 	return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL);
    348 }
    349 
    350 /*
    351  * Generate some data from the underlying generator.
    352  */
    353 static void
    354 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
    355 {
    356 	const uint32_t cc = cprng_counter();
    357 
    358 	KASSERT(bytes <= CPRNG_MAX_LEN);
    359 	KASSERT(mutex_owned(&cprng->cs_lock));
    360 
    361 	/*
    362 	 * Generate some data from the NIST CTR_DRBG.  Caller
    363 	 * guarantees reseed if we're not ready, and if we exhaust the
    364 	 * generator, we mark ourselves not ready.  Consequently, this
    365 	 * call to the CTR_DRBG should not fail.
    366 	 */
    367 	if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer,
    368 		    bytes, &cc, sizeof(cc))))
    369 		panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name);
    370 
    371 	/*
    372 	 * If we've been seeing a lot of use, ask for some fresh
    373 	 * entropy soon.
    374 	 */
    375 	if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg)))
    376 		rndsink_schedule(cprng->cs_rndsink);
    377 
    378 	/*
    379 	 * If we just exhausted the generator, inform the next user
    380 	 * that we need a reseed.
    381 	 */
    382 	if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) {
    383 		cprng->cs_ready = false;
    384 		rndsink_schedule(cprng->cs_rndsink); /* paranoia */
    385 	}
    386 }
    387 
    388 /*
    389  * Reseed with whatever we can get from the system entropy pool right now.
    390  */
    391 static void
    392 cprng_strong_reseed(struct cprng_strong *cprng)
    393 {
    394 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
    395 
    396 	KASSERT(mutex_owned(&cprng->cs_lock));
    397 
    398 	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
    399 	    sizeof(seed));
    400 	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
    401 	explicit_memset(seed, 0, sizeof(seed));
    402 }
    403 
    404 /*
    405  * Reseed with the given seed.  If we now have full entropy, notify waiters.
    406  */
    407 static void
    408 cprng_strong_reseed_from(struct cprng_strong *cprng,
    409     const void *seed, size_t bytes, bool full_entropy)
    410 {
    411 	const uint32_t cc = cprng_counter();
    412 
    413 	KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES);
    414 	KASSERT(mutex_owned(&cprng->cs_lock));
    415 
    416 	/*
    417 	 * Notify anyone interested in the partiality of entropy in our
    418 	 * seed -- anyone waiting for full entropy, or any system
    419 	 * operators interested in knowing when the entropy pool is
    420 	 * running on fumes.
    421 	 */
    422 	if (full_entropy) {
    423 		if (!cprng->cs_ready) {
    424 			cprng->cs_ready = true;
    425 			cv_broadcast(&cprng->cs_cv);
    426 			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
    427 			    NOTE_SUBMIT);
    428 		}
    429 	} else {
    430 		/*
    431 		 * XXX Is there is any harm in reseeding with partial
    432 		 * entropy when we had full entropy before?  If so,
    433 		 * remove the conditional on this message.
    434 		 */
    435 		if (!cprng->cs_ready &&
    436 		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
    437 			printf("cprng %s: reseeding with partial entropy\n",
    438 			    cprng->cs_name);
    439 	}
    440 
    441 	if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc)))
    442 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
    443 		panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name);
    444 
    445 #if DEBUG
    446 	cprng_strong_rngtest(cprng);
    447 #endif
    448 }
    449 
    450 #if DEBUG
    451 /*
    452  * Generate some output and apply a statistical RNG test to it.
    453  */
    454 static void
    455 cprng_strong_rngtest(struct cprng_strong *cprng)
    456 {
    457 
    458 	KASSERT(mutex_owned(&cprng->cs_lock));
    459 
    460 	/* XXX Switch to a pool cache instead?  */
    461 	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
    462 	if (rt == NULL)
    463 		/* XXX Warn?  */
    464 		return;
    465 
    466 	(void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
    467 
    468 	if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b),
    469 		NULL, 0))
    470 		panic("cprng %s: NIST CTR_DRBG failed after reseed",
    471 		    cprng->cs_name);
    472 
    473 	if (rngtest(rt)) {
    474 		printf("cprng %s: failed statistical RNG test\n",
    475 		    cprng->cs_name);
    476 		/* XXX Not clear that this does any good...  */
    477 		cprng->cs_ready = false;
    478 		rndsink_schedule(cprng->cs_rndsink);
    479 	}
    480 
    481 	explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
    482 	kmem_intr_free(rt, sizeof(*rt));
    483 }
    484 #endif
    485 
    486 /*
    487  * Feed entropy from an rndsink request into the CPRNG for which the
    488  * request was issued.
    489  */
    490 static void
    491 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
    492 {
    493 	struct cprng_strong *const cprng = context;
    494 
    495 	mutex_enter(&cprng->cs_lock);
    496 	/* Assume that rndsinks provide only full-entropy output.  */
    497 	cprng_strong_reseed_from(cprng, seed, bytes, true);
    498 	mutex_exit(&cprng->cs_lock);
    499 }
    500 
    501 static cprng_strong_t *sysctl_prng;
    502 
    503 static int
    504 makeprng(void)
    505 {
    506 
    507 	/* can't create in cprng_init(), too early */
    508 	sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
    509 					  CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
    510 	return 0;
    511 }
    512 
    513 /*
    514  * sysctl helper routine for kern.urandom node. Picks a random number
    515  * for you.
    516  */
    517 static int
    518 sysctl_kern_urnd(SYSCTLFN_ARGS)
    519 {
    520 	static ONCE_DECL(control);
    521 	int v, rv;
    522 
    523 	RUN_ONCE(&control, makeprng);
    524 	rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
    525 	if (rv == sizeof(v)) {
    526 		struct sysctlnode node = *rnode;
    527 		node.sysctl_data = &v;
    528 		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    529 	}
    530 	else
    531 		return (EIO);	/*XXX*/
    532 }
    533 
    534 /*
    535  * sysctl helper routine for kern.arandom node. Picks a random number
    536  * for you.
    537  */
    538 static int
    539 sysctl_kern_arnd(SYSCTLFN_ARGS)
    540 {
    541 	int error;
    542 	void *v;
    543 	struct sysctlnode node = *rnode;
    544 
    545 	if (*oldlenp == 0)
    546 		return 0;
    547 	/*
    548 	 * This code used to allow sucking 8192 bytes at a time out
    549 	 * of the kernel arc4random generator.  Evidently there is some
    550 	 * very old OpenBSD application code that may try to do this.
    551 	 *
    552 	 * Note that this node is documented as type "INT" -- 4 or 8
    553 	 * bytes, not 8192.
    554 	 *
    555 	 * We continue to support this abuse of the "len" pointer here
    556 	 * but only 256 bytes at a time, as, anecdotally, the actual
    557 	 * application use here was to generate RC4 keys in userspace.
    558 	 *
    559 	 * Support for such large requests will probably be removed
    560 	 * entirely in the future.
    561 	 */
    562 	if (*oldlenp > 256)
    563 		return E2BIG;
    564 
    565 	v = kmem_alloc(*oldlenp, KM_SLEEP);
    566 	cprng_fast(v, *oldlenp);
    567 	node.sysctl_data = v;
    568 	node.sysctl_size = *oldlenp;
    569 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    570 	kmem_free(v, *oldlenp);
    571 	return error;
    572 }
    573