Home | History | Annotate | Line # | Download | only in kern
subr_cprng.c revision 1.7.2.6
      1  1.7.2.6  yamt /*	$NetBSD: subr_cprng.c,v 1.7.2.6 2014/05/22 11:41:03 yamt Exp $ */
      2  1.7.2.2  yamt 
      3  1.7.2.2  yamt /*-
      4  1.7.2.6  yamt  * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
      5  1.7.2.2  yamt  * All rights reserved.
      6  1.7.2.2  yamt  *
      7  1.7.2.2  yamt  * This code is derived from software contributed to The NetBSD Foundation
      8  1.7.2.6  yamt  * by Thor Lancelot Simon and Taylor R. Campbell.
      9  1.7.2.2  yamt  *
     10  1.7.2.2  yamt  * Redistribution and use in source and binary forms, with or without
     11  1.7.2.2  yamt  * modification, are permitted provided that the following conditions
     12  1.7.2.2  yamt  * are met:
     13  1.7.2.2  yamt  * 1. Redistributions of source code must retain the above copyright
     14  1.7.2.2  yamt  *    notice, this list of conditions and the following disclaimer.
     15  1.7.2.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.7.2.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     17  1.7.2.2  yamt  *    documentation and/or other materials provided with the distribution.
     18  1.7.2.2  yamt  *
     19  1.7.2.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.7.2.2  yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.7.2.2  yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.7.2.2  yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.7.2.2  yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.7.2.2  yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.7.2.2  yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.7.2.2  yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.7.2.2  yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.7.2.2  yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.7.2.2  yamt  * POSSIBILITY OF SUCH DAMAGE.
     30  1.7.2.2  yamt  */
     31  1.7.2.2  yamt 
     32  1.7.2.6  yamt #include <sys/cdefs.h>
     33  1.7.2.6  yamt __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.7.2.6 2014/05/22 11:41:03 yamt Exp $");
     34  1.7.2.6  yamt 
     35  1.7.2.2  yamt #include <sys/param.h>
     36  1.7.2.6  yamt #include <sys/types.h>
     37  1.7.2.6  yamt #include <sys/condvar.h>
     38  1.7.2.6  yamt #include <sys/cprng.h>
     39  1.7.2.6  yamt #include <sys/errno.h>
     40  1.7.2.6  yamt #include <sys/event.h>		/* XXX struct knote */
     41  1.7.2.6  yamt #include <sys/fcntl.h>		/* XXX FNONBLOCK */
     42  1.7.2.2  yamt #include <sys/kernel.h>
     43  1.7.2.2  yamt #include <sys/kmem.h>
     44  1.7.2.6  yamt #include <sys/lwp.h>
     45  1.7.2.6  yamt #include <sys/once.h>
     46  1.7.2.6  yamt #include <sys/poll.h>		/* XXX POLLIN/POLLOUT/&c. */
     47  1.7.2.6  yamt #include <sys/select.h>
     48  1.7.2.6  yamt #include <sys/systm.h>
     49  1.7.2.6  yamt #include <sys/sysctl.h>
     50  1.7.2.2  yamt #include <sys/rnd.h>
     51  1.7.2.6  yamt #include <sys/rndsink.h>
     52  1.7.2.6  yamt #if DEBUG
     53  1.7.2.6  yamt #include <sys/rngtest.h>
     54  1.7.2.6  yamt #endif
     55  1.7.2.6  yamt 
     56  1.7.2.6  yamt #include <crypto/nist_ctr_drbg/nist_ctr_drbg.h>
     57  1.7.2.2  yamt 
     58  1.7.2.2  yamt #if defined(__HAVE_CPU_COUNTER)
     59  1.7.2.2  yamt #include <machine/cpu_counter.h>
     60  1.7.2.2  yamt #endif
     61  1.7.2.2  yamt 
     62  1.7.2.6  yamt static int sysctl_kern_urnd(SYSCTLFN_PROTO);
     63  1.7.2.6  yamt static int sysctl_kern_arnd(SYSCTLFN_PROTO);
     64  1.7.2.6  yamt 
     65  1.7.2.6  yamt static void	cprng_strong_generate(struct cprng_strong *, void *, size_t);
     66  1.7.2.6  yamt static void	cprng_strong_reseed(struct cprng_strong *);
     67  1.7.2.6  yamt static void	cprng_strong_reseed_from(struct cprng_strong *, const void *,
     68  1.7.2.6  yamt 		    size_t, bool);
     69  1.7.2.6  yamt #if DEBUG
     70  1.7.2.6  yamt static void	cprng_strong_rngtest(struct cprng_strong *);
     71  1.7.2.6  yamt #endif
     72  1.7.2.2  yamt 
     73  1.7.2.6  yamt static rndsink_callback_t	cprng_strong_rndsink_callback;
     74  1.7.2.2  yamt 
     75  1.7.2.2  yamt void
     76  1.7.2.2  yamt cprng_init(void)
     77  1.7.2.2  yamt {
     78  1.7.2.6  yamt 	static struct sysctllog *random_sysctllog;
     79  1.7.2.6  yamt 
     80  1.7.2.2  yamt 	nist_ctr_initialize();
     81  1.7.2.6  yamt 
     82  1.7.2.6  yamt 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
     83  1.7.2.6  yamt 		       CTLFLAG_PERMANENT,
     84  1.7.2.6  yamt 		       CTLTYPE_INT, "urandom",
     85  1.7.2.6  yamt 		       SYSCTL_DESCR("Random integer value"),
     86  1.7.2.6  yamt 		       sysctl_kern_urnd, 0, NULL, 0,
     87  1.7.2.6  yamt 		       CTL_KERN, KERN_URND, CTL_EOL);
     88  1.7.2.6  yamt 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
     89  1.7.2.6  yamt 		       CTLFLAG_PERMANENT,
     90  1.7.2.6  yamt 		       CTLTYPE_INT, "arandom",
     91  1.7.2.6  yamt 		       SYSCTL_DESCR("n bytes of random data"),
     92  1.7.2.6  yamt 		       sysctl_kern_arnd, 0, NULL, 0,
     93  1.7.2.6  yamt 		       CTL_KERN, KERN_ARND, CTL_EOL);
     94  1.7.2.2  yamt }
     95  1.7.2.2  yamt 
     96  1.7.2.2  yamt static inline uint32_t
     97  1.7.2.2  yamt cprng_counter(void)
     98  1.7.2.2  yamt {
     99  1.7.2.2  yamt 	struct timeval tv;
    100  1.7.2.2  yamt 
    101  1.7.2.2  yamt #if defined(__HAVE_CPU_COUNTER)
    102  1.7.2.2  yamt 	if (cpu_hascounter())
    103  1.7.2.2  yamt 		return cpu_counter32();
    104  1.7.2.2  yamt #endif
    105  1.7.2.2  yamt 	if (__predict_false(cold)) {
    106  1.7.2.2  yamt 		/* microtime unsafe if clock not running yet */
    107  1.7.2.2  yamt 		return 0;
    108  1.7.2.2  yamt 	}
    109  1.7.2.2  yamt 	microtime(&tv);
    110  1.7.2.2  yamt 	return (tv.tv_sec * 1000000 + tv.tv_usec);
    111  1.7.2.2  yamt }
    112  1.7.2.2  yamt 
    113  1.7.2.6  yamt struct cprng_strong {
    114  1.7.2.6  yamt 	char		cs_name[16];
    115  1.7.2.6  yamt 	int		cs_flags;
    116  1.7.2.6  yamt 	kmutex_t	cs_lock;
    117  1.7.2.6  yamt 	kcondvar_t	cs_cv;
    118  1.7.2.6  yamt 	struct selinfo	cs_selq;
    119  1.7.2.6  yamt 	struct rndsink	*cs_rndsink;
    120  1.7.2.6  yamt 	bool		cs_ready;
    121  1.7.2.6  yamt 	NIST_CTR_DRBG	cs_drbg;
    122  1.7.2.6  yamt 
    123  1.7.2.6  yamt 	/* XXX Kludge for /dev/random `information-theoretic' properties.   */
    124  1.7.2.6  yamt 	unsigned int	cs_remaining;
    125  1.7.2.6  yamt };
    126  1.7.2.6  yamt 
    127  1.7.2.6  yamt struct cprng_strong *
    128  1.7.2.6  yamt cprng_strong_create(const char *name, int ipl, int flags)
    129  1.7.2.3  yamt {
    130  1.7.2.6  yamt 	const uint32_t cc = cprng_counter();
    131  1.7.2.6  yamt 	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
    132  1.7.2.6  yamt 	    KM_SLEEP);
    133  1.7.2.3  yamt 
    134  1.7.2.6  yamt 	/*
    135  1.7.2.6  yamt 	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
    136  1.7.2.6  yamt 	 * higher than that.
    137  1.7.2.6  yamt 	 */
    138  1.7.2.6  yamt 	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
    139  1.7.2.3  yamt 
    140  1.7.2.6  yamt 	/* Initialize the easy fields.  */
    141  1.7.2.6  yamt 	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
    142  1.7.2.6  yamt 	cprng->cs_flags = flags;
    143  1.7.2.6  yamt 	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
    144  1.7.2.6  yamt 	cv_init(&cprng->cs_cv, cprng->cs_name);
    145  1.7.2.6  yamt 	selinit(&cprng->cs_selq);
    146  1.7.2.6  yamt 	cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES,
    147  1.7.2.6  yamt 	    &cprng_strong_rndsink_callback, cprng);
    148  1.7.2.6  yamt 
    149  1.7.2.6  yamt 	/* Get some initial entropy.  Record whether it is full entropy.  */
    150  1.7.2.6  yamt 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
    151  1.7.2.6  yamt 	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
    152  1.7.2.6  yamt 	    sizeof(seed));
    153  1.7.2.6  yamt 	if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
    154  1.7.2.6  yamt 		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
    155  1.7.2.6  yamt 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
    156  1.7.2.6  yamt 		panic("cprng %s: NIST CTR_DRBG instantiation failed",
    157  1.7.2.6  yamt 		    cprng->cs_name);
    158  1.7.2.6  yamt 	explicit_memset(seed, 0, sizeof(seed));
    159  1.7.2.6  yamt 
    160  1.7.2.6  yamt 	if (ISSET(flags, CPRNG_HARD))
    161  1.7.2.6  yamt 		cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
    162  1.7.2.6  yamt 	else
    163  1.7.2.6  yamt 		cprng->cs_remaining = 0;
    164  1.7.2.6  yamt 
    165  1.7.2.6  yamt 	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
    166  1.7.2.6  yamt 		printf("cprng %s: creating with partial entropy\n",
    167  1.7.2.6  yamt 		    cprng->cs_name);
    168  1.7.2.6  yamt 
    169  1.7.2.6  yamt 	return cprng;
    170  1.7.2.6  yamt }
    171  1.7.2.6  yamt 
    172  1.7.2.6  yamt void
    173  1.7.2.6  yamt cprng_strong_destroy(struct cprng_strong *cprng)
    174  1.7.2.6  yamt {
    175  1.7.2.6  yamt 
    176  1.7.2.6  yamt 	/*
    177  1.7.2.6  yamt 	 * Destroy the rndsink first to prevent calls to the callback.
    178  1.7.2.6  yamt 	 */
    179  1.7.2.6  yamt 	rndsink_destroy(cprng->cs_rndsink);
    180  1.7.2.6  yamt 
    181  1.7.2.6  yamt 	KASSERT(!cv_has_waiters(&cprng->cs_cv));
    182  1.7.2.6  yamt #if 0
    183  1.7.2.6  yamt 	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
    184  1.7.2.3  yamt #endif
    185  1.7.2.6  yamt 
    186  1.7.2.6  yamt 	nist_ctr_drbg_destroy(&cprng->cs_drbg);
    187  1.7.2.6  yamt 	seldestroy(&cprng->cs_selq);
    188  1.7.2.6  yamt 	cv_destroy(&cprng->cs_cv);
    189  1.7.2.6  yamt 	mutex_destroy(&cprng->cs_lock);
    190  1.7.2.6  yamt 
    191  1.7.2.6  yamt 	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
    192  1.7.2.6  yamt 	kmem_free(cprng, sizeof(*cprng));
    193  1.7.2.3  yamt }
    194  1.7.2.3  yamt 
    195  1.7.2.6  yamt /*
    196  1.7.2.6  yamt  * Generate some data from cprng.  Block or return zero bytes,
    197  1.7.2.6  yamt  * depending on flags & FNONBLOCK, if cprng was created without
    198  1.7.2.6  yamt  * CPRNG_REKEY_ANY.
    199  1.7.2.6  yamt  */
    200  1.7.2.6  yamt size_t
    201  1.7.2.6  yamt cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
    202  1.7.2.2  yamt {
    203  1.7.2.6  yamt 	size_t result;
    204  1.7.2.6  yamt 
    205  1.7.2.6  yamt 	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
    206  1.7.2.6  yamt 	bytes = MIN(bytes, CPRNG_MAX_LEN);
    207  1.7.2.6  yamt 
    208  1.7.2.6  yamt 	mutex_enter(&cprng->cs_lock);
    209  1.7.2.6  yamt 
    210  1.7.2.6  yamt 	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
    211  1.7.2.6  yamt 		if (!cprng->cs_ready)
    212  1.7.2.6  yamt 			cprng_strong_reseed(cprng);
    213  1.7.2.6  yamt 	} else {
    214  1.7.2.6  yamt 		while (!cprng->cs_ready) {
    215  1.7.2.6  yamt 			if (ISSET(flags, FNONBLOCK) ||
    216  1.7.2.6  yamt 			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
    217  1.7.2.6  yamt 			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
    218  1.7.2.6  yamt 				result = 0;
    219  1.7.2.6  yamt 				goto out;
    220  1.7.2.3  yamt 			}
    221  1.7.2.3  yamt 		}
    222  1.7.2.2  yamt 	}
    223  1.7.2.6  yamt 
    224  1.7.2.6  yamt 	/*
    225  1.7.2.6  yamt 	 * Debit the entropy if requested.
    226  1.7.2.6  yamt 	 *
    227  1.7.2.6  yamt 	 * XXX Kludge for /dev/random `information-theoretic' properties.
    228  1.7.2.6  yamt 	 */
    229  1.7.2.6  yamt 	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
    230  1.7.2.6  yamt 		KASSERT(0 < cprng->cs_remaining);
    231  1.7.2.6  yamt 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
    232  1.7.2.6  yamt 		if (bytes < cprng->cs_remaining) {
    233  1.7.2.6  yamt 			cprng->cs_remaining -= bytes;
    234  1.7.2.6  yamt 		} else {
    235  1.7.2.6  yamt 			bytes = cprng->cs_remaining;
    236  1.7.2.6  yamt 			cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
    237  1.7.2.6  yamt 			cprng->cs_ready = false;
    238  1.7.2.6  yamt 			rndsink_schedule(cprng->cs_rndsink);
    239  1.7.2.6  yamt 		}
    240  1.7.2.6  yamt 		KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
    241  1.7.2.6  yamt 		KASSERT(0 < cprng->cs_remaining);
    242  1.7.2.6  yamt 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
    243  1.7.2.3  yamt 	}
    244  1.7.2.6  yamt 
    245  1.7.2.6  yamt 	cprng_strong_generate(cprng, buffer, bytes);
    246  1.7.2.6  yamt 	result = bytes;
    247  1.7.2.6  yamt 
    248  1.7.2.6  yamt out:	mutex_exit(&cprng->cs_lock);
    249  1.7.2.6  yamt 	return result;
    250  1.7.2.2  yamt }
    251  1.7.2.2  yamt 
    252  1.7.2.6  yamt static void	filt_cprng_detach(struct knote *);
    253  1.7.2.6  yamt static int	filt_cprng_event(struct knote *, long);
    254  1.7.2.6  yamt 
    255  1.7.2.6  yamt static const struct filterops cprng_filtops =
    256  1.7.2.6  yamt 	{ 1, NULL, filt_cprng_detach, filt_cprng_event };
    257  1.7.2.6  yamt 
    258  1.7.2.6  yamt int
    259  1.7.2.6  yamt cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
    260  1.7.2.2  yamt {
    261  1.7.2.3  yamt 
    262  1.7.2.6  yamt 	switch (kn->kn_filter) {
    263  1.7.2.6  yamt 	case EVFILT_READ:
    264  1.7.2.6  yamt 		kn->kn_fop = &cprng_filtops;
    265  1.7.2.6  yamt 		kn->kn_hook = cprng;
    266  1.7.2.6  yamt 		mutex_enter(&cprng->cs_lock);
    267  1.7.2.6  yamt 		SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
    268  1.7.2.6  yamt 		mutex_exit(&cprng->cs_lock);
    269  1.7.2.6  yamt 		return 0;
    270  1.7.2.2  yamt 
    271  1.7.2.6  yamt 	case EVFILT_WRITE:
    272  1.7.2.6  yamt 	default:
    273  1.7.2.6  yamt 		return EINVAL;
    274  1.7.2.2  yamt 	}
    275  1.7.2.6  yamt }
    276  1.7.2.2  yamt 
    277  1.7.2.6  yamt static void
    278  1.7.2.6  yamt filt_cprng_detach(struct knote *kn)
    279  1.7.2.6  yamt {
    280  1.7.2.6  yamt 	struct cprng_strong *const cprng = kn->kn_hook;
    281  1.7.2.6  yamt 
    282  1.7.2.6  yamt 	mutex_enter(&cprng->cs_lock);
    283  1.7.2.6  yamt 	SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
    284  1.7.2.6  yamt 	mutex_exit(&cprng->cs_lock);
    285  1.7.2.2  yamt }
    286  1.7.2.2  yamt 
    287  1.7.2.6  yamt static int
    288  1.7.2.6  yamt filt_cprng_event(struct knote *kn, long hint)
    289  1.7.2.2  yamt {
    290  1.7.2.6  yamt 	struct cprng_strong *const cprng = kn->kn_hook;
    291  1.7.2.6  yamt 	int ret;
    292  1.7.2.2  yamt 
    293  1.7.2.6  yamt 	if (hint == NOTE_SUBMIT)
    294  1.7.2.6  yamt 		KASSERT(mutex_owned(&cprng->cs_lock));
    295  1.7.2.6  yamt 	else
    296  1.7.2.6  yamt 		mutex_enter(&cprng->cs_lock);
    297  1.7.2.6  yamt 	if (cprng->cs_ready) {
    298  1.7.2.6  yamt 		kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large?  */
    299  1.7.2.6  yamt 		ret = 1;
    300  1.7.2.6  yamt 	} else {
    301  1.7.2.6  yamt 		ret = 0;
    302  1.7.2.2  yamt 	}
    303  1.7.2.6  yamt 	if (hint == NOTE_SUBMIT)
    304  1.7.2.6  yamt 		KASSERT(mutex_owned(&cprng->cs_lock));
    305  1.7.2.6  yamt 	else
    306  1.7.2.6  yamt 		mutex_exit(&cprng->cs_lock);
    307  1.7.2.2  yamt 
    308  1.7.2.6  yamt 	return ret;
    309  1.7.2.6  yamt }
    310  1.7.2.2  yamt 
    311  1.7.2.6  yamt int
    312  1.7.2.6  yamt cprng_strong_poll(struct cprng_strong *cprng, int events)
    313  1.7.2.6  yamt {
    314  1.7.2.6  yamt 	int revents;
    315  1.7.2.2  yamt 
    316  1.7.2.6  yamt 	if (!ISSET(events, (POLLIN | POLLRDNORM)))
    317  1.7.2.6  yamt 		return 0;
    318  1.7.2.2  yamt 
    319  1.7.2.6  yamt 	mutex_enter(&cprng->cs_lock);
    320  1.7.2.6  yamt 	if (cprng->cs_ready) {
    321  1.7.2.6  yamt 		revents = (events & (POLLIN | POLLRDNORM));
    322  1.7.2.6  yamt 	} else {
    323  1.7.2.6  yamt 		selrecord(curlwp, &cprng->cs_selq);
    324  1.7.2.6  yamt 		revents = 0;
    325  1.7.2.2  yamt 	}
    326  1.7.2.6  yamt 	mutex_exit(&cprng->cs_lock);
    327  1.7.2.2  yamt 
    328  1.7.2.6  yamt 	return revents;
    329  1.7.2.2  yamt }
    330  1.7.2.2  yamt 
    331  1.7.2.6  yamt /*
    332  1.7.2.6  yamt  * XXX Move nist_ctr_drbg_reseed_advised_p and
    333  1.7.2.6  yamt  * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make
    334  1.7.2.6  yamt  * the NIST_CTR_DRBG structure opaque.
    335  1.7.2.6  yamt  */
    336  1.7.2.6  yamt static bool
    337  1.7.2.6  yamt nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg)
    338  1.7.2.2  yamt {
    339  1.7.2.2  yamt 
    340  1.7.2.6  yamt 	return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2));
    341  1.7.2.6  yamt }
    342  1.7.2.2  yamt 
    343  1.7.2.6  yamt static bool
    344  1.7.2.6  yamt nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg)
    345  1.7.2.6  yamt {
    346  1.7.2.6  yamt 
    347  1.7.2.6  yamt 	return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL);
    348  1.7.2.6  yamt }
    349  1.7.2.6  yamt 
    350  1.7.2.6  yamt /*
    351  1.7.2.6  yamt  * Generate some data from the underlying generator.
    352  1.7.2.6  yamt  */
    353  1.7.2.6  yamt static void
    354  1.7.2.6  yamt cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
    355  1.7.2.6  yamt {
    356  1.7.2.6  yamt 	const uint32_t cc = cprng_counter();
    357  1.7.2.6  yamt 
    358  1.7.2.6  yamt 	KASSERT(bytes <= CPRNG_MAX_LEN);
    359  1.7.2.6  yamt 	KASSERT(mutex_owned(&cprng->cs_lock));
    360  1.7.2.2  yamt 
    361  1.7.2.2  yamt 	/*
    362  1.7.2.6  yamt 	 * Generate some data from the NIST CTR_DRBG.  Caller
    363  1.7.2.6  yamt 	 * guarantees reseed if we're not ready, and if we exhaust the
    364  1.7.2.6  yamt 	 * generator, we mark ourselves not ready.  Consequently, this
    365  1.7.2.6  yamt 	 * call to the CTR_DRBG should not fail.
    366  1.7.2.2  yamt 	 */
    367  1.7.2.6  yamt 	if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer,
    368  1.7.2.6  yamt 		    bytes, &cc, sizeof(cc))))
    369  1.7.2.6  yamt 		panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name);
    370  1.7.2.2  yamt 
    371  1.7.2.6  yamt 	/*
    372  1.7.2.6  yamt 	 * If we've been seeing a lot of use, ask for some fresh
    373  1.7.2.6  yamt 	 * entropy soon.
    374  1.7.2.6  yamt 	 */
    375  1.7.2.6  yamt 	if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg)))
    376  1.7.2.6  yamt 		rndsink_schedule(cprng->cs_rndsink);
    377  1.7.2.6  yamt 
    378  1.7.2.6  yamt 	/*
    379  1.7.2.6  yamt 	 * If we just exhausted the generator, inform the next user
    380  1.7.2.6  yamt 	 * that we need a reseed.
    381  1.7.2.6  yamt 	 */
    382  1.7.2.6  yamt 	if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) {
    383  1.7.2.6  yamt 		cprng->cs_ready = false;
    384  1.7.2.6  yamt 		rndsink_schedule(cprng->cs_rndsink); /* paranoia */
    385  1.7.2.2  yamt 	}
    386  1.7.2.6  yamt }
    387  1.7.2.6  yamt 
    388  1.7.2.6  yamt /*
    389  1.7.2.6  yamt  * Reseed with whatever we can get from the system entropy pool right now.
    390  1.7.2.6  yamt  */
    391  1.7.2.6  yamt static void
    392  1.7.2.6  yamt cprng_strong_reseed(struct cprng_strong *cprng)
    393  1.7.2.6  yamt {
    394  1.7.2.6  yamt 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
    395  1.7.2.6  yamt 
    396  1.7.2.6  yamt 	KASSERT(mutex_owned(&cprng->cs_lock));
    397  1.7.2.6  yamt 
    398  1.7.2.6  yamt 	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
    399  1.7.2.6  yamt 	    sizeof(seed));
    400  1.7.2.6  yamt 	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
    401  1.7.2.6  yamt 	explicit_memset(seed, 0, sizeof(seed));
    402  1.7.2.6  yamt }
    403  1.7.2.6  yamt 
    404  1.7.2.6  yamt /*
    405  1.7.2.6  yamt  * Reseed with the given seed.  If we now have full entropy, notify waiters.
    406  1.7.2.6  yamt  */
    407  1.7.2.6  yamt static void
    408  1.7.2.6  yamt cprng_strong_reseed_from(struct cprng_strong *cprng,
    409  1.7.2.6  yamt     const void *seed, size_t bytes, bool full_entropy)
    410  1.7.2.6  yamt {
    411  1.7.2.6  yamt 	const uint32_t cc = cprng_counter();
    412  1.7.2.6  yamt 
    413  1.7.2.6  yamt 	KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES);
    414  1.7.2.6  yamt 	KASSERT(mutex_owned(&cprng->cs_lock));
    415  1.7.2.6  yamt 
    416  1.7.2.6  yamt 	/*
    417  1.7.2.6  yamt 	 * Notify anyone interested in the partiality of entropy in our
    418  1.7.2.6  yamt 	 * seed -- anyone waiting for full entropy, or any system
    419  1.7.2.6  yamt 	 * operators interested in knowing when the entropy pool is
    420  1.7.2.6  yamt 	 * running on fumes.
    421  1.7.2.6  yamt 	 */
    422  1.7.2.6  yamt 	if (full_entropy) {
    423  1.7.2.6  yamt 		if (!cprng->cs_ready) {
    424  1.7.2.6  yamt 			cprng->cs_ready = true;
    425  1.7.2.6  yamt 			cv_broadcast(&cprng->cs_cv);
    426  1.7.2.6  yamt 			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
    427  1.7.2.6  yamt 			    NOTE_SUBMIT);
    428  1.7.2.2  yamt 		}
    429  1.7.2.6  yamt 	} else {
    430  1.7.2.6  yamt 		/*
    431  1.7.2.6  yamt 		 * XXX Is there is any harm in reseeding with partial
    432  1.7.2.6  yamt 		 * entropy when we had full entropy before?  If so,
    433  1.7.2.6  yamt 		 * remove the conditional on this message.
    434  1.7.2.6  yamt 		 */
    435  1.7.2.6  yamt 		if (!cprng->cs_ready &&
    436  1.7.2.6  yamt 		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
    437  1.7.2.6  yamt 			printf("cprng %s: reseeding with partial entropy\n",
    438  1.7.2.6  yamt 			    cprng->cs_name);
    439  1.7.2.6  yamt 	}
    440  1.7.2.6  yamt 
    441  1.7.2.6  yamt 	if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc)))
    442  1.7.2.6  yamt 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
    443  1.7.2.6  yamt 		panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name);
    444  1.7.2.2  yamt 
    445  1.7.2.6  yamt #if DEBUG
    446  1.7.2.6  yamt 	cprng_strong_rngtest(cprng);
    447  1.7.2.6  yamt #endif
    448  1.7.2.2  yamt }
    449  1.7.2.2  yamt 
    450  1.7.2.6  yamt #if DEBUG
    451  1.7.2.6  yamt /*
    452  1.7.2.6  yamt  * Generate some output and apply a statistical RNG test to it.
    453  1.7.2.6  yamt  */
    454  1.7.2.6  yamt static void
    455  1.7.2.6  yamt cprng_strong_rngtest(struct cprng_strong *cprng)
    456  1.7.2.2  yamt {
    457  1.7.2.2  yamt 
    458  1.7.2.6  yamt 	KASSERT(mutex_owned(&cprng->cs_lock));
    459  1.7.2.2  yamt 
    460  1.7.2.6  yamt 	/* XXX Switch to a pool cache instead?  */
    461  1.7.2.6  yamt 	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
    462  1.7.2.6  yamt 	if (rt == NULL)
    463  1.7.2.6  yamt 		/* XXX Warn?  */
    464  1.7.2.6  yamt 		return;
    465  1.7.2.6  yamt 
    466  1.7.2.6  yamt 	(void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
    467  1.7.2.6  yamt 
    468  1.7.2.6  yamt 	if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b),
    469  1.7.2.6  yamt 		NULL, 0))
    470  1.7.2.6  yamt 		panic("cprng %s: NIST CTR_DRBG failed after reseed",
    471  1.7.2.6  yamt 		    cprng->cs_name);
    472  1.7.2.6  yamt 
    473  1.7.2.6  yamt 	if (rngtest(rt)) {
    474  1.7.2.6  yamt 		printf("cprng %s: failed statistical RNG test\n",
    475  1.7.2.6  yamt 		    cprng->cs_name);
    476  1.7.2.6  yamt 		/* XXX Not clear that this does any good...  */
    477  1.7.2.6  yamt 		cprng->cs_ready = false;
    478  1.7.2.6  yamt 		rndsink_schedule(cprng->cs_rndsink);
    479  1.7.2.2  yamt 	}
    480  1.7.2.2  yamt 
    481  1.7.2.6  yamt 	explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
    482  1.7.2.6  yamt 	kmem_intr_free(rt, sizeof(*rt));
    483  1.7.2.6  yamt }
    484  1.7.2.6  yamt #endif
    485  1.7.2.2  yamt 
    486  1.7.2.6  yamt /*
    487  1.7.2.6  yamt  * Feed entropy from an rndsink request into the CPRNG for which the
    488  1.7.2.6  yamt  * request was issued.
    489  1.7.2.6  yamt  */
    490  1.7.2.6  yamt static void
    491  1.7.2.6  yamt cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
    492  1.7.2.6  yamt {
    493  1.7.2.6  yamt 	struct cprng_strong *const cprng = context;
    494  1.7.2.2  yamt 
    495  1.7.2.6  yamt 	mutex_enter(&cprng->cs_lock);
    496  1.7.2.6  yamt 	/* Assume that rndsinks provide only full-entropy output.  */
    497  1.7.2.6  yamt 	cprng_strong_reseed_from(cprng, seed, bytes, true);
    498  1.7.2.6  yamt 	mutex_exit(&cprng->cs_lock);
    499  1.7.2.2  yamt }
    500  1.7.2.2  yamt 
    501  1.7.2.6  yamt static cprng_strong_t *sysctl_prng;
    502  1.7.2.6  yamt 
    503  1.7.2.6  yamt static int
    504  1.7.2.6  yamt makeprng(void)
    505  1.7.2.2  yamt {
    506  1.7.2.6  yamt 
    507  1.7.2.6  yamt 	/* can't create in cprng_init(), too early */
    508  1.7.2.6  yamt 	sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
    509  1.7.2.6  yamt 					  CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
    510  1.7.2.6  yamt 	return 0;
    511  1.7.2.2  yamt }
    512  1.7.2.2  yamt 
    513  1.7.2.6  yamt /*
    514  1.7.2.6  yamt  * sysctl helper routine for kern.urandom node. Picks a random number
    515  1.7.2.6  yamt  * for you.
    516  1.7.2.6  yamt  */
    517  1.7.2.6  yamt static int
    518  1.7.2.6  yamt sysctl_kern_urnd(SYSCTLFN_ARGS)
    519  1.7.2.2  yamt {
    520  1.7.2.6  yamt 	static ONCE_DECL(control);
    521  1.7.2.6  yamt 	int v, rv;
    522  1.7.2.6  yamt 
    523  1.7.2.6  yamt 	RUN_ONCE(&control, makeprng);
    524  1.7.2.6  yamt 	rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
    525  1.7.2.6  yamt 	if (rv == sizeof(v)) {
    526  1.7.2.6  yamt 		struct sysctlnode node = *rnode;
    527  1.7.2.6  yamt 		node.sysctl_data = &v;
    528  1.7.2.6  yamt 		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    529  1.7.2.2  yamt 	}
    530  1.7.2.6  yamt 	else
    531  1.7.2.6  yamt 		return (EIO);	/*XXX*/
    532  1.7.2.6  yamt }
    533  1.7.2.6  yamt 
    534  1.7.2.6  yamt /*
    535  1.7.2.6  yamt  * sysctl helper routine for kern.arandom node. Picks a random number
    536  1.7.2.6  yamt  * for you.
    537  1.7.2.6  yamt  */
    538  1.7.2.6  yamt static int
    539  1.7.2.6  yamt sysctl_kern_arnd(SYSCTLFN_ARGS)
    540  1.7.2.6  yamt {
    541  1.7.2.6  yamt 	int error;
    542  1.7.2.6  yamt 	void *v;
    543  1.7.2.6  yamt 	struct sysctlnode node = *rnode;
    544  1.7.2.6  yamt 
    545  1.7.2.6  yamt 	if (*oldlenp == 0)
    546  1.7.2.6  yamt 		return 0;
    547  1.7.2.6  yamt 	/*
    548  1.7.2.6  yamt 	 * This code used to allow sucking 8192 bytes at a time out
    549  1.7.2.6  yamt 	 * of the kernel arc4random generator.  Evidently there is some
    550  1.7.2.6  yamt 	 * very old OpenBSD application code that may try to do this.
    551  1.7.2.6  yamt 	 *
    552  1.7.2.6  yamt 	 * Note that this node is documented as type "INT" -- 4 or 8
    553  1.7.2.6  yamt 	 * bytes, not 8192.
    554  1.7.2.6  yamt 	 *
    555  1.7.2.6  yamt 	 * We continue to support this abuse of the "len" pointer here
    556  1.7.2.6  yamt 	 * but only 256 bytes at a time, as, anecdotally, the actual
    557  1.7.2.6  yamt 	 * application use here was to generate RC4 keys in userspace.
    558  1.7.2.6  yamt 	 *
    559  1.7.2.6  yamt 	 * Support for such large requests will probably be removed
    560  1.7.2.6  yamt 	 * entirely in the future.
    561  1.7.2.6  yamt 	 */
    562  1.7.2.6  yamt 	if (*oldlenp > 256)
    563  1.7.2.6  yamt 		return E2BIG;
    564  1.7.2.6  yamt 
    565  1.7.2.6  yamt 	v = kmem_alloc(*oldlenp, KM_SLEEP);
    566  1.7.2.6  yamt 	cprng_fast(v, *oldlenp);
    567  1.7.2.6  yamt 	node.sysctl_data = v;
    568  1.7.2.6  yamt 	node.sysctl_size = *oldlenp;
    569  1.7.2.6  yamt 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    570  1.7.2.6  yamt 	kmem_free(v, *oldlenp);
    571  1.7.2.6  yamt 	return error;
    572  1.7.2.2  yamt }
    573