Home | History | Annotate | Line # | Download | only in kern
subr_cprng.c revision 1.23.2.1
      1 /*	$NetBSD: subr_cprng.c,v 1.23.2.1 2014/07/17 14:03:33 tls Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Thor Lancelot Simon and Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.23.2.1 2014/07/17 14:03:33 tls Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/types.h>
     37 #include <sys/condvar.h>
     38 #include <sys/cprng.h>
     39 #include <sys/errno.h>
     40 #include <sys/event.h>		/* XXX struct knote */
     41 #include <sys/fcntl.h>		/* XXX FNONBLOCK */
     42 #include <sys/kernel.h>
     43 #include <sys/kmem.h>
     44 #include <sys/lwp.h>
     45 #include <sys/once.h>
     46 #include <sys/percpu.h>
     47 #include <sys/poll.h>		/* XXX POLLIN/POLLOUT/&c. */
     48 #include <sys/select.h>
     49 #include <sys/systm.h>
     50 #include <sys/sysctl.h>
     51 #include <sys/rnd.h>
     52 #include <sys/rndsink.h>
     53 #if DEBUG
     54 #include <sys/rngtest.h>
     55 #endif
     56 
     57 #include <crypto/nist_ctr_drbg/nist_ctr_drbg.h>
     58 #include <crypto/ccrand/ccrand.h>
     59 
     60 #if defined(__HAVE_CPU_COUNTER)
     61 #include <machine/cpu_counter.h>
     62 #endif
     63 
     64 #define CPRNGF_MAXBYTES           (512 * 1024 * 1024)
     65 #define CPRNGF_HARDMAX            (1 * 1024 * 1024 * 1024)
     66 #define CPRNGF_RESEED_SECONDS     600
     67 
     68 typedef struct {
     69 	ccrand_t	ccrand;
     70 	int		numbytes;
     71 	time_t		nextreseed;
     72 } cprng_fast_ctx_t;
     73 
     74 static int sysctl_kern_urnd(SYSCTLFN_PROTO);
     75 static int sysctl_kern_arnd(SYSCTLFN_PROTO);
     76 
     77 static void	cprng_strong_generate(struct cprng_strong *, void *, size_t);
     78 static void	cprng_strong_reseed(struct cprng_strong *);
     79 static void	cprng_strong_reseed_from(struct cprng_strong *, const void *,
     80 		    size_t, bool);
     81 static void	cprng_fast_randrekey(cprng_fast_ctx_t *);
     82 void		*cprng_fast_rekey_softintr = NULL;
     83 
     84 #if DEBUG
     85 static void	cprng_strong_rngtest(struct cprng_strong *);
     86 static void	cprng_fast_rngtest(void);
     87 #endif
     88 
     89 static rndsink_callback_t	cprng_strong_rndsink_callback;
     90 
     91 percpu_t *percpu_cprng_fast_ctx;
     92 static int cprng_fast_initialized;
     93 
     94 void
     95 cprng_init(void)
     96 {
     97 	static struct sysctllog *random_sysctllog;
     98 
     99 	nist_ctr_initialize();
    100 
    101 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
    102 		       CTLFLAG_PERMANENT,
    103 		       CTLTYPE_INT, "urandom",
    104 		       SYSCTL_DESCR("Random integer value"),
    105 		       sysctl_kern_urnd, 0, NULL, 0,
    106 		       CTL_KERN, KERN_URND, CTL_EOL);
    107 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
    108 		       CTLFLAG_PERMANENT,
    109 		       CTLTYPE_INT, "arandom",
    110 		       SYSCTL_DESCR("n bytes of random data"),
    111 		       sysctl_kern_arnd, 0, NULL, 0,
    112 		       CTL_KERN, KERN_ARND, CTL_EOL);
    113 }
    114 
    115 static inline uint32_t
    116 cprng_counter(void)
    117 {
    118 	struct timeval tv;
    119 
    120 #if defined(__HAVE_CPU_COUNTER)
    121 	if (cpu_hascounter())
    122 		return cpu_counter32();
    123 #endif
    124 	if (__predict_false(cold)) {
    125 		static int ctr;
    126 		/* microtime unsafe if clock not running yet */
    127 		return ctr++;
    128 	}
    129 	getmicrotime(&tv);
    130 	return (tv.tv_sec * 1000000 + tv.tv_usec);
    131 }
    132 
    133 struct cprng_strong {
    134 	char		cs_name[16];
    135 	int		cs_flags;
    136 	kmutex_t	cs_lock;
    137 	kcondvar_t	cs_cv;
    138 	struct selinfo	cs_selq;
    139 	struct rndsink	*cs_rndsink;
    140 	bool		cs_ready;
    141 	NIST_CTR_DRBG	cs_drbg;
    142 
    143 	/* XXX Kludge for /dev/random `information-theoretic' properties.   */
    144 	unsigned int	cs_remaining;
    145 };
    146 
    147 struct cprng_strong *
    148 cprng_strong_create(const char *name, int ipl, int flags)
    149 {
    150 	const uint32_t cc = cprng_counter();
    151 	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
    152 	    KM_SLEEP);
    153 
    154 	/*
    155 	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
    156 	 * higher than that.
    157 	 */
    158 	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
    159 
    160 	/* Initialize the easy fields.  */
    161 	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
    162 	cprng->cs_flags = flags;
    163 	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
    164 	cv_init(&cprng->cs_cv, cprng->cs_name);
    165 	selinit(&cprng->cs_selq);
    166 	cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES,
    167 	    &cprng_strong_rndsink_callback, cprng);
    168 
    169 	/* Get some initial entropy.  Record whether it is full entropy.  */
    170 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
    171 	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
    172 	    sizeof(seed));
    173 	if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
    174 		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
    175 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
    176 		panic("cprng %s: NIST CTR_DRBG instantiation failed",
    177 		    cprng->cs_name);
    178 	explicit_memset(seed, 0, sizeof(seed));
    179 
    180 	if (ISSET(flags, CPRNG_HARD))
    181 		cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
    182 	else
    183 		cprng->cs_remaining = 0;
    184 
    185 	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
    186 		printf("cprng %s: creating with partial entropy\n",
    187 		    cprng->cs_name);
    188 
    189 	return cprng;
    190 }
    191 
    192 void
    193 cprng_strong_destroy(struct cprng_strong *cprng)
    194 {
    195 
    196 	/*
    197 	 * Destroy the rndsink first to prevent calls to the callback.
    198 	 */
    199 	rndsink_destroy(cprng->cs_rndsink);
    200 
    201 	KASSERT(!cv_has_waiters(&cprng->cs_cv));
    202 #if 0
    203 	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
    204 #endif
    205 
    206 	nist_ctr_drbg_destroy(&cprng->cs_drbg);
    207 	seldestroy(&cprng->cs_selq);
    208 	cv_destroy(&cprng->cs_cv);
    209 	mutex_destroy(&cprng->cs_lock);
    210 
    211 	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
    212 	kmem_free(cprng, sizeof(*cprng));
    213 }
    214 
    215 /*
    216  * Generate some data from cprng.  Block or return zero bytes,
    217  * depending on flags & FNONBLOCK, if cprng was created without
    218  * CPRNG_REKEY_ANY.
    219  */
    220 size_t
    221 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
    222 {
    223 	size_t result;
    224 
    225 	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
    226 	bytes = MIN(bytes, CPRNG_MAX_LEN);
    227 
    228 	mutex_enter(&cprng->cs_lock);
    229 
    230 	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
    231 		if (!cprng->cs_ready)
    232 			cprng_strong_reseed(cprng);
    233 	} else {
    234 		while (!cprng->cs_ready) {
    235 			if (ISSET(flags, FNONBLOCK) ||
    236 			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
    237 			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
    238 				result = 0;
    239 				goto out;
    240 			}
    241 		}
    242 	}
    243 
    244 	/*
    245 	 * Debit the entropy if requested.
    246 	 *
    247 	 * XXX Kludge for /dev/random `information-theoretic' properties.
    248 	 */
    249 	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
    250 		KASSERT(0 < cprng->cs_remaining);
    251 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
    252 		if (bytes < cprng->cs_remaining) {
    253 			cprng->cs_remaining -= bytes;
    254 		} else {
    255 			bytes = cprng->cs_remaining;
    256 			cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
    257 			cprng->cs_ready = false;
    258 			rndsink_schedule(cprng->cs_rndsink);
    259 		}
    260 		KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
    261 		KASSERT(0 < cprng->cs_remaining);
    262 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
    263 	}
    264 
    265 	cprng_strong_generate(cprng, buffer, bytes);
    266 	result = bytes;
    267 
    268 out:	mutex_exit(&cprng->cs_lock);
    269 	return result;
    270 }
    271 
    272 static void	filt_cprng_detach(struct knote *);
    273 static int	filt_cprng_event(struct knote *, long);
    274 
    275 static const struct filterops cprng_filtops =
    276 	{ 1, NULL, filt_cprng_detach, filt_cprng_event };
    277 
    278 int
    279 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
    280 {
    281 
    282 	switch (kn->kn_filter) {
    283 	case EVFILT_READ:
    284 		kn->kn_fop = &cprng_filtops;
    285 		kn->kn_hook = cprng;
    286 		mutex_enter(&cprng->cs_lock);
    287 		SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
    288 		mutex_exit(&cprng->cs_lock);
    289 		return 0;
    290 
    291 	case EVFILT_WRITE:
    292 	default:
    293 		return EINVAL;
    294 	}
    295 }
    296 
    297 static void
    298 filt_cprng_detach(struct knote *kn)
    299 {
    300 	struct cprng_strong *const cprng = kn->kn_hook;
    301 
    302 	mutex_enter(&cprng->cs_lock);
    303 	SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
    304 	mutex_exit(&cprng->cs_lock);
    305 }
    306 
    307 static int
    308 filt_cprng_event(struct knote *kn, long hint)
    309 {
    310 	struct cprng_strong *const cprng = kn->kn_hook;
    311 	int ret;
    312 
    313 	if (hint == NOTE_SUBMIT)
    314 		KASSERT(mutex_owned(&cprng->cs_lock));
    315 	else
    316 		mutex_enter(&cprng->cs_lock);
    317 	if (cprng->cs_ready) {
    318 		kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large?  */
    319 		ret = 1;
    320 	} else {
    321 		ret = 0;
    322 	}
    323 	if (hint == NOTE_SUBMIT)
    324 		KASSERT(mutex_owned(&cprng->cs_lock));
    325 	else
    326 		mutex_exit(&cprng->cs_lock);
    327 
    328 	return ret;
    329 }
    330 
    331 int
    332 cprng_strong_poll(struct cprng_strong *cprng, int events)
    333 {
    334 	int revents;
    335 
    336 	if (!ISSET(events, (POLLIN | POLLRDNORM)))
    337 		return 0;
    338 
    339 	mutex_enter(&cprng->cs_lock);
    340 	if (cprng->cs_ready) {
    341 		revents = (events & (POLLIN | POLLRDNORM));
    342 	} else {
    343 		selrecord(curlwp, &cprng->cs_selq);
    344 		revents = 0;
    345 	}
    346 	mutex_exit(&cprng->cs_lock);
    347 
    348 	return revents;
    349 }
    350 
    351 /*
    352  * XXX Move nist_ctr_drbg_reseed_advised_p and
    353  * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make
    354  * the NIST_CTR_DRBG structure opaque.
    355  */
    356 static bool
    357 nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg)
    358 {
    359 
    360 	return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2));
    361 }
    362 
    363 static bool
    364 nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg)
    365 {
    366 
    367 	return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL);
    368 }
    369 
    370 /*
    371  * Generate some data from the underlying generator.
    372  */
    373 static void
    374 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
    375 {
    376 	const uint32_t cc = cprng_counter();
    377 
    378 	KASSERT(bytes <= CPRNG_MAX_LEN);
    379 	KASSERT(mutex_owned(&cprng->cs_lock));
    380 
    381 	/*
    382 	 * Generate some data from the NIST CTR_DRBG.  Caller
    383 	 * guarantees reseed if we're not ready, and if we exhaust the
    384 	 * generator, we mark ourselves not ready.  Consequently, this
    385 	 * call to the CTR_DRBG should not fail.
    386 	 */
    387 	if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer,
    388 		    bytes, &cc, sizeof(cc))))
    389 		panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name);
    390 
    391 	/*
    392 	 * If we've been seeing a lot of use, ask for some fresh
    393 	 * entropy soon.
    394 	 */
    395 	if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg)))
    396 		rndsink_schedule(cprng->cs_rndsink);
    397 
    398 	/*
    399 	 * If we just exhausted the generator, inform the next user
    400 	 * that we need a reseed.
    401 	 */
    402 	if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) {
    403 		cprng->cs_ready = false;
    404 		rndsink_schedule(cprng->cs_rndsink); /* paranoia */
    405 	}
    406 }
    407 
    408 /*
    409  * Reseed with whatever we can get from the system entropy pool right now.
    410  */
    411 static void
    412 cprng_strong_reseed(struct cprng_strong *cprng)
    413 {
    414 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
    415 
    416 	KASSERT(mutex_owned(&cprng->cs_lock));
    417 
    418 	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
    419 	    sizeof(seed));
    420 	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
    421 	explicit_memset(seed, 0, sizeof(seed));
    422 }
    423 
    424 /*
    425  * Reseed with the given seed.  If we now have full entropy, notify waiters.
    426  */
    427 static void
    428 cprng_strong_reseed_from(struct cprng_strong *cprng,
    429     const void *seed, size_t bytes, bool full_entropy)
    430 {
    431 	const uint32_t cc = cprng_counter();
    432 
    433 	KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES);
    434 	KASSERT(mutex_owned(&cprng->cs_lock));
    435 
    436 	/*
    437 	 * Notify anyone interested in the partiality of entropy in our
    438 	 * seed -- anyone waiting for full entropy, or any system
    439 	 * operators interested in knowing when the entropy pool is
    440 	 * running on fumes.
    441 	 */
    442 	if (full_entropy) {
    443 		if (!cprng->cs_ready) {
    444 			cprng->cs_ready = true;
    445 			cv_broadcast(&cprng->cs_cv);
    446 			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
    447 			    NOTE_SUBMIT);
    448 		}
    449 	} else {
    450 		/*
    451 		 * XXX Is there is any harm in reseeding with partial
    452 		 * entropy when we had full entropy before?  If so,
    453 		 * remove the conditional on this message.
    454 		 */
    455 		if (!cprng->cs_ready &&
    456 		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
    457 			printf("cprng %s: reseeding with partial entropy\n",
    458 			    cprng->cs_name);
    459 	}
    460 
    461 	if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc)))
    462 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
    463 		panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name);
    464 
    465 #if DEBUG
    466 	cprng_strong_rngtest(cprng);
    467 #endif
    468 }
    469 
    470 #if DEBUG
    471 /*
    472  * Generate some output and apply a statistical RNG test to it.
    473  */
    474 static void
    475 cprng_strong_rngtest(struct cprng_strong *cprng)
    476 {
    477 
    478 	KASSERT(mutex_owned(&cprng->cs_lock));
    479 
    480 	/* XXX Switch to a pool cache instead?  */
    481 	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
    482 	if (rt == NULL)
    483 		/* XXX Warn?  */
    484 		return;
    485 
    486 	(void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
    487 
    488 	if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b),
    489 		NULL, 0))
    490 		panic("cprng %s: NIST CTR_DRBG failed after reseed",
    491 		    cprng->cs_name);
    492 
    493 	if (rngtest(rt)) {
    494 		printf("cprng %s: failed statistical RNG test\n",
    495 		    cprng->cs_name);
    496 		/* XXX Not clear that this does any good...  */
    497 		cprng->cs_ready = false;
    498 		rndsink_schedule(cprng->cs_rndsink);
    499 	}
    500 
    501 	explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
    502 	kmem_intr_free(rt, sizeof(*rt));
    503 }
    504 
    505 static void cprng_fast_rngtest(void)
    506 {
    507 	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
    508 	if (rt == NULL)
    509 		/* XXX Warn? */
    510 		return;
    511 
    512 	(void)snprintf(rt->rt_name, sizeof(rt->rt_name),
    513 		       "cpu%d", curcpu()->ci_index);
    514 	cprng_fast(rt->rt_b, sizeof(rt->rt_b));
    515 
    516 	if (rngtest(rt)) {
    517 		printf("cprng_fast for %s: failed statistical RNG test\n",
    518 		       rt->rt_name);
    519 	}
    520 	explicit_memset(rt, 0, sizeof(*rt));
    521 	kmem_intr_free(rt, sizeof(*rt));
    522 }
    523 #endif
    524 
    525 /*
    526  * Feed entropy from an rndsink request into the CPRNG for which the
    527  * request was issued.
    528  */
    529 static void
    530 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
    531 {
    532 	struct cprng_strong *const cprng = context;
    533 
    534 	mutex_enter(&cprng->cs_lock);
    535 	/* Assume that rndsinks provide only full-entropy output.  */
    536 	cprng_strong_reseed_from(cprng, seed, bytes, true);
    537 	mutex_exit(&cprng->cs_lock);
    538 }
    539 
    540 static cprng_strong_t *sysctl_prng;
    541 
    542 static int
    543 makeprng(void)
    544 {
    545 
    546 	/* can't create in cprng_init(), too early */
    547 	sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
    548 					  CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
    549 	return 0;
    550 }
    551 
    552 /*
    553  * sysctl helper routine for kern.urandom node. Picks a random number
    554  * for you.
    555  */
    556 static int
    557 sysctl_kern_urnd(SYSCTLFN_ARGS)
    558 {
    559 	static ONCE_DECL(control);
    560 	int v, rv;
    561 
    562 	RUN_ONCE(&control, makeprng);
    563 	rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
    564 	if (rv == sizeof(v)) {
    565 		struct sysctlnode node = *rnode;
    566 		node.sysctl_data = &v;
    567 		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    568 	}
    569 	else
    570 		return (EIO);	/*XXX*/
    571 }
    572 
    573 /*
    574  * sysctl helper routine for kern.arandom node.  Fills the supplied
    575  * structure with random data for you.
    576  *
    577  * This node was originally declared as type "int" but its implementation
    578  * in OpenBSD, whence it came, would happily return up to 8K of data if
    579  * requested.  Evidently this was used to key RC4 in userspace.
    580  *
    581  * In NetBSD, the libc stack-smash-protection code reads 64 bytes
    582  * from here at every program startup.  So though it would be nice
    583  * to make this node return only 32 or 64 bits, we can't.  Too bad!
    584  */
    585 static int
    586 sysctl_kern_arnd(SYSCTLFN_ARGS)
    587 {
    588 	int error;
    589 	void *v;
    590 	struct sysctlnode node = *rnode;
    591 
    592 	switch (*oldlenp) {
    593 	    case 0:
    594 		return 0;
    595 	    default:
    596 		if (*oldlenp > 256) {
    597 			return E2BIG;
    598 		}
    599 		v = kmem_alloc(*oldlenp, KM_SLEEP);
    600 		cprng_fast(v, *oldlenp);
    601 		node.sysctl_data = v;
    602 		node.sysctl_size = *oldlenp;
    603 		error = sysctl_lookup(SYSCTLFN_CALL(&node));
    604 		kmem_free(v, *oldlenp);
    605 		return error;
    606 	}
    607 }
    608 
    609 static void
    610 cprng_fast_randrekey(cprng_fast_ctx_t *ctx)
    611 {
    612 	uint8_t key[16];
    613 	int s;
    614 
    615 	int have_initial = rnd_initial_entropy;
    616 
    617 	cprng_strong(kern_cprng, key, sizeof(key), FASYNC);
    618 	s = splhigh();
    619 	ccrand_reseed(&ctx->ccrand, (uint32_t *)key,
    620 		      sizeof(key) / sizeof(uint32_t));
    621 	splx(s);
    622 	explicit_memset(key, 0, sizeof(key));
    623 	/*
    624 	 * Reset for next reseed cycle.
    625 	 */
    626 	ctx->nextreseed = time_uptime +
    627 	    (have_initial ? CPRNGF_RESEED_SECONDS : 0);
    628 	ctx->numbytes = 0;
    629 
    630 #if DEBUG
    631 	cprng_fast_rngtest();
    632 #endif
    633 }
    634 
    635 static void
    636 cprng_fast_init_ctx(void *v,
    637 	      void *arg __unused,
    638 	      struct cpu_info * ci __unused)
    639 {
    640 	cprng_fast_ctx_t *ctx = v;
    641 	cprng_fast_randrekey(ctx);
    642 }
    643 
    644 static void
    645 cprng_fast_rekey_one(void *arg __unused)
    646 {
    647 	cprng_fast_ctx_t *ctx = percpu_getref(percpu_cprng_fast_ctx);
    648 
    649 	cprng_fast_randrekey(ctx);
    650 	percpu_putref(percpu_cprng_fast_ctx);
    651 }
    652 
    653 /*
    654  * Because we key the cprng_fast instances from the kernel_cprng,
    655  * and we try not to initialize the kernel_cprng until there is at
    656  * least some chance there's entropy available for it, this must
    657  * be called somewhat later than cprng_init() and is thus a separate
    658  * function.
    659  */
    660 void
    661 cprng_fast_init(void)
    662 {
    663         percpu_cprng_fast_ctx = percpu_alloc(sizeof(cprng_fast_ctx_t));
    664 
    665         percpu_foreach(percpu_cprng_fast_ctx, cprng_fast_init_ctx, NULL);
    666 	cprng_fast_rekey_softintr =
    667 	    softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
    668 			      cprng_fast_rekey_one, NULL);
    669 	cprng_fast_initialized++;
    670 }
    671 
    672 static inline void
    673 cprng_fast_checkrekey(cprng_fast_ctx_t *ctx)
    674 {
    675 	extern void *cprng_fast_rekey_softintr;
    676 
    677 	if (__predict_false((ctx->numbytes > CPRNGF_MAXBYTES) ||
    678 			    (time_uptime > ctx->nextreseed))) {
    679 		/* Schedule a deferred reseed */
    680 		softint_schedule(cprng_fast_rekey_softintr);
    681 	}
    682 }
    683 
    684 uint32_t
    685 cprng_fast32(void)
    686 {
    687 	cprng_fast_ctx_t *ctx = percpu_getref(percpu_cprng_fast_ctx);
    688 	int s;
    689 	uint32_t ret;
    690 
    691 	cprng_fast_checkrekey(ctx);
    692 
    693 	s = splhigh();
    694 	ret = ccrand32(&ctx->ccrand);
    695 	splx(s);
    696 	ctx->numbytes+= sizeof(ret);
    697 	percpu_putref(percpu_cprng_fast_ctx);
    698 	return ret;
    699 }
    700 
    701 uint64_t
    702 cprng_fast64(void)
    703 {
    704 	cprng_fast_ctx_t *ctx = percpu_getref(percpu_cprng_fast_ctx);
    705 	int s;
    706 	uint64_t ret;
    707 
    708 	cprng_fast_checkrekey(ctx);
    709 
    710 	s = splhigh();
    711 	ret = ccrand64(&ctx->ccrand);
    712 	splx(s);
    713 	ctx->numbytes += sizeof(ret);
    714 	percpu_putref(percpu_cprng_fast_ctx);
    715 	return ret;
    716 }
    717 
    718 size_t
    719 cprng_fast(void *p, size_t len)
    720 {
    721 	cprng_fast_ctx_t *ctx = percpu_getref(percpu_cprng_fast_ctx);
    722 	int s;
    723 
    724 	cprng_fast_checkrekey(ctx);
    725 
    726 	s = splhigh();
    727 	ccrand_bytes(&ctx->ccrand, p, len);
    728 	splx(s);
    729 	ctx->numbytes += len;
    730 	percpu_putref(percpu_cprng_fast_ctx);
    731 	return len;
    732 }
    733