Home | History | Annotate | Line # | Download | only in kern
subr_cprng.c revision 1.39
      1 /*	$NetBSD: subr_cprng.c,v 1.39 2020/05/11 21:38:54 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * cprng_strong
     34  *
     35  *	Per-CPU NIST Hash_DRBG, reseeded automatically from the entropy
     36  *	pool when we transition to full entropy, never blocking.  This
     37  *	is slightly different from the old cprng_strong API, but the
     38  *	only users of the old one fell into three categories:
     39  *
     40  *	1. never-blocking, oughta-be-per-CPU (kern_cprng, sysctl_prng)
     41  *	2. never-blocking, used per-CPU anyway (/dev/urandom short reads)
     42  *	3. /dev/random
     43  *
     44  *	This code serves the first two categories without having extra
     45  *	logic for /dev/random.
     46  *
     47  *	kern_cprng - available at IPL_VM or lower
     48  *	user_cprng - available only at IPL_NONE in thread context
     49  *
     50  *	The name kern_cprng is for hysterical raisins.  The name
     51  *	user_cprng serves only to contrast with kern_cprng.
     52  */
     53 
     54 #include <sys/cdefs.h>
     55 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.39 2020/05/11 21:38:54 riastradh Exp $");
     56 
     57 #include <sys/types.h>
     58 #include <sys/cprng.h>
     59 #include <sys/cpu.h>
     60 #include <sys/entropy.h>
     61 #include <sys/errno.h>
     62 #include <sys/evcnt.h>
     63 #include <sys/intr.h>
     64 #include <sys/kmem.h>
     65 #include <sys/percpu.h>
     66 #include <sys/sysctl.h>
     67 #include <sys/systm.h>
     68 
     69 #include <crypto/nist_hash_drbg/nist_hash_drbg.h>
     70 
     71 /*
     72  * struct cprng_strong
     73  */
     74 struct cprng_strong {
     75 	struct percpu		*cs_percpu; /* struct cprng_cpu */
     76 	ipl_cookie_t		cs_iplcookie;
     77 };
     78 
     79 /*
     80  * struct cprng_cpu
     81  *
     82  *	Per-CPU state for a cprng_strong.  The DRBG and evcnt are
     83  *	allocated separately because percpu(9) sometimes moves per-CPU
     84  *	objects around without zeroing them.
     85  */
     86 struct cprng_cpu {
     87 	struct nist_hash_drbg	*cc_drbg;
     88 	struct {
     89 		struct evcnt	reseed;
     90 		struct evcnt	intr;
     91 	}			*cc_evcnt;
     92 	unsigned		cc_epoch;
     93 };
     94 
     95 static int	sysctl_kern_urandom(SYSCTLFN_ARGS);
     96 static int	sysctl_kern_arandom(SYSCTLFN_ARGS);
     97 static void	cprng_init_cpu(void *, void *, struct cpu_info *);
     98 static void	cprng_fini_cpu(void *, void *, struct cpu_info *);
     99 
    100 /* Well-known CPRNG instances */
    101 struct cprng_strong *kern_cprng __read_mostly; /* IPL_VM */
    102 struct cprng_strong *user_cprng __read_mostly; /* IPL_NONE */
    103 
    104 static struct sysctllog *cprng_sysctllog __read_mostly;
    105 static bool cprng_initialized __read_mostly = false;
    106 
    107 void
    108 cprng_init(void)
    109 {
    110 
    111 	if (__predict_false(nist_hash_drbg_initialize() != 0))
    112 		panic("NIST Hash_DRBG failed self-test");
    113 
    114 	/*
    115 	 * Create CPRNG instances at two IPLs: IPL_VM for kernel use
    116 	 * that may occur inside IPL_VM interrupt handlers (!!??!?!?),
    117 	 * and IPL_NONE for userland use which need not block
    118 	 * interrupts.
    119 	 */
    120 	kern_cprng = cprng_strong_create("kern", IPL_VM, 0);
    121 	user_cprng = cprng_strong_create("user", IPL_NONE, 0);
    122 
    123 	/* Create kern.urandom and kern.arandom sysctl nodes.  */
    124 	sysctl_createv(&cprng_sysctllog, 0, NULL, NULL,
    125 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, "urandom",
    126 	    SYSCTL_DESCR("Independent uniform random 32-bit integer"),
    127 	    sysctl_kern_urandom, 0, NULL, 0, CTL_KERN, KERN_URND, CTL_EOL);
    128 	sysctl_createv(&cprng_sysctllog, 0, NULL, NULL,
    129 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT /*lie*/, "arandom",
    130 	    SYSCTL_DESCR("Independent uniform random bytes, up to 256 bytes"),
    131 	    sysctl_kern_arandom, 0, NULL, 0, CTL_KERN, KERN_ARND, CTL_EOL);
    132 
    133 	/* Ready to go.  */
    134 	cprng_initialized = true;
    135 }
    136 
    137 /*
    138  * sysctl kern.urandom
    139  *
    140  *	Independent uniform random 32-bit integer.  Read-only.
    141  */
    142 static int
    143 sysctl_kern_urandom(SYSCTLFN_ARGS)
    144 {
    145 	struct sysctlnode node = *rnode;
    146 	int v;
    147 	int error;
    148 
    149 	/* Generate an int's worth of data.  */
    150 	cprng_strong(user_cprng, &v, sizeof v, 0);
    151 
    152 	/* Do the sysctl dance.  */
    153 	node.sysctl_data = &v;
    154 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    155 
    156 	/* Clear the buffer before returning the sysctl error.  */
    157 	explicit_memset(&v, 0, sizeof v);
    158 	return error;
    159 }
    160 
    161 /*
    162  * sysctl kern.arandom
    163  *
    164  *	Independent uniform random bytes, up to 256 bytes.  Read-only.
    165  */
    166 static int
    167 sysctl_kern_arandom(SYSCTLFN_ARGS)
    168 {
    169 	struct sysctlnode node = *rnode;
    170 	uint8_t buf[256];
    171 	int error;
    172 
    173 	/*
    174 	 * Clamp to a reasonably small size.  256 bytes is kind of
    175 	 * arbitrary; 32 would be more reasonable, but we used 256 in
    176 	 * the past, so let's not break compatibility.
    177 	 */
    178 	if (*oldlenp > 256)	/* size_t, so never negative */
    179 		*oldlenp = 256;
    180 
    181 	/* Generate data.  */
    182 	cprng_strong(user_cprng, buf, *oldlenp, 0);
    183 
    184 	/* Do the sysctl dance.  */
    185 	node.sysctl_data = buf;
    186 	node.sysctl_size = *oldlenp;
    187 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    188 
    189 	/* Clear the buffer before returning the sysctl error.  */
    190 	explicit_memset(buf, 0, sizeof buf);
    191 	return error;
    192 }
    193 
    194 struct cprng_strong *
    195 cprng_strong_create(const char *name, int ipl, int flags)
    196 {
    197 	struct cprng_strong *cprng;
    198 
    199 	cprng = kmem_alloc(sizeof(*cprng), KM_SLEEP);
    200 	cprng->cs_iplcookie = makeiplcookie(ipl);
    201 	cprng->cs_percpu = percpu_create(sizeof(struct cprng_cpu),
    202 	    cprng_init_cpu, cprng_fini_cpu, __UNCONST(name));
    203 
    204 	return cprng;
    205 }
    206 
    207 void
    208 cprng_strong_destroy(struct cprng_strong *cprng)
    209 {
    210 
    211 	percpu_free(cprng->cs_percpu, sizeof(struct cprng_cpu));
    212 	kmem_free(cprng, sizeof(*cprng));
    213 }
    214 
    215 static void
    216 cprng_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    217 {
    218 	struct cprng_cpu *cc = ptr;
    219 	const char *name = cookie;
    220 	const char *cpuname;
    221 	uint8_t zero[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0};
    222 	char namebuf[64];	/* XXX size? */
    223 
    224 	/*
    225 	 * Format the name as, e.g., kern/8 if we're on cpu8.  This
    226 	 * doesn't get displayed anywhere; it just ensures that if
    227 	 * there were a bug causing us to use the same otherwise secure
    228 	 * seed on multiple CPUs, we would still get independent output
    229 	 * from the NIST Hash_DRBG.
    230 	 */
    231 	snprintf(namebuf, sizeof namebuf, "%s/%u", name, cpu_index(ci));
    232 
    233 	/*
    234 	 * Allocate the struct nist_hash_drbg and struct evcnt
    235 	 * separately, since percpu(9) may move objects around in
    236 	 * memory without zeroing.
    237 	 */
    238 	cc->cc_drbg = kmem_zalloc(sizeof(*cc->cc_drbg), KM_SLEEP);
    239 	cc->cc_evcnt = kmem_alloc(sizeof(*cc->cc_evcnt), KM_SLEEP);
    240 
    241 	/*
    242 	 * Initialize the DRBG with no seed.  We do this in order to
    243 	 * defer reading from the entropy pool as long as possible.
    244 	 */
    245 	if (__predict_false(nist_hash_drbg_instantiate(cc->cc_drbg,
    246 		    zero, sizeof zero, NULL, 0, namebuf, strlen(namebuf))))
    247 		panic("nist_hash_drbg_instantiate");
    248 
    249 	/* Attach the event counters.  */
    250 	/* XXX ci_cpuname may not be initialized early enough.  */
    251 	cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname;
    252 	evcnt_attach_dynamic(&cc->cc_evcnt->intr, EVCNT_TYPE_MISC, NULL,
    253 	    cpuname, "cprng_strong intr");
    254 	evcnt_attach_dynamic(&cc->cc_evcnt->reseed, EVCNT_TYPE_MISC, NULL,
    255 	    cpuname, "cprng_strong reseed");
    256 
    257 	/* Set the epoch uninitialized so we reseed on first use.  */
    258 	cc->cc_epoch = 0;
    259 }
    260 
    261 static void
    262 cprng_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
    263 {
    264 	struct cprng_cpu *cc = ptr;
    265 
    266 	evcnt_detach(&cc->cc_evcnt->reseed);
    267 	evcnt_detach(&cc->cc_evcnt->intr);
    268 	if (__predict_false(nist_hash_drbg_destroy(cc->cc_drbg)))
    269 		panic("nist_hash_drbg_destroy");
    270 
    271 	kmem_free(cc->cc_evcnt, sizeof(*cc->cc_evcnt));
    272 	kmem_free(cc->cc_drbg, sizeof(*cc->cc_drbg));
    273 }
    274 
    275 size_t
    276 cprng_strong(struct cprng_strong *cprng, void *buf, size_t len, int flags)
    277 {
    278 	uint32_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES];
    279 	struct cprng_cpu *cc;
    280 	unsigned epoch;
    281 	int s;
    282 
    283 	/*
    284 	 * Some device drivers try to use cprng_strong in attach during
    285 	 * autoconf, e.g. to randomly generate MAC addresses, before we
    286 	 * percpu is available -- percpu is not available until after
    287 	 * CPUs have been detected during autoconf.  We should make
    288 	 * percpu available sooner, but for now this works around it.
    289 	 */
    290 	if (__predict_false(!cprng_initialized)) {
    291 		struct nist_hash_drbg drbg;
    292 		entropy_extract(seed, sizeof seed, 0);
    293 		if (__predict_false(nist_hash_drbg_instantiate(&drbg,
    294 			    seed, sizeof seed, NULL, 0, NULL, 0)))
    295 			panic("nist_hash_drbg_instantiate");
    296 		if (__predict_false(nist_hash_drbg_generate(&drbg, buf, len,
    297 			    NULL, 0)))
    298 			panic("nist_hash_drbg_generate");
    299 		return len;
    300 	}
    301 
    302 	/*
    303 	 * Verify maximum request length.  Caller should really limit
    304 	 * their requests to 32 bytes to avoid spending much time with
    305 	 * preemption disabled -- use the 32 bytes to seed a private
    306 	 * DRBG instance if you need more data.
    307 	 */
    308 	KASSERT(len <= CPRNG_MAX_LEN);
    309 
    310 	/* Verify legacy API use.  */
    311 	KASSERT(flags == 0);
    312 
    313 	/* Acquire per-CPU state and block interrupts.  */
    314 	cc = percpu_getref(cprng->cs_percpu);
    315 	s = splraiseipl(cprng->cs_iplcookie);
    316 
    317 	if (cpu_intr_p())
    318 		cc->cc_evcnt->intr.ev_count++;
    319 
    320 	/* If the entropy epoch has changed, (re)seed.  */
    321 	epoch = entropy_epoch();
    322 	if (__predict_false(epoch != cc->cc_epoch)) {
    323 		entropy_extract(seed, sizeof seed, 0);
    324 		cc->cc_evcnt->reseed.ev_count++;
    325 		if (__predict_false(nist_hash_drbg_reseed(cc->cc_drbg,
    326 			    seed, sizeof seed, NULL, 0)))
    327 			panic("nist_hash_drbg_reseed");
    328 		explicit_memset(seed, 0, sizeof seed);
    329 		cc->cc_epoch = epoch;
    330 	}
    331 
    332 	/* Generate data.  Failure here means it's time to reseed.  */
    333 	if (__predict_false(nist_hash_drbg_generate(cc->cc_drbg, buf, len,
    334 		    NULL, 0))) {
    335 		entropy_extract(seed, sizeof seed, 0);
    336 		cc->cc_evcnt->reseed.ev_count++;
    337 		if (__predict_false(nist_hash_drbg_reseed(cc->cc_drbg,
    338 			    seed, sizeof seed, NULL, 0)))
    339 			panic("nist_hash_drbg_reseed");
    340 		explicit_memset(seed, 0, sizeof seed);
    341 		if (__predict_false(nist_hash_drbg_generate(cc->cc_drbg,
    342 			    buf, len, NULL, 0)))
    343 			panic("nist_hash_drbg_generate");
    344 	}
    345 
    346 	/* Release state and interrupts.  */
    347 	splx(s);
    348 	percpu_putref(cprng->cs_percpu);
    349 
    350 	/* Return the number of bytes generated, for hysterical raisins.  */
    351 	return len;
    352 }
    353 
    354 uint32_t
    355 cprng_strong32(void)
    356 {
    357 	uint32_t r;
    358 	cprng_strong(kern_cprng, &r, sizeof(r), 0);
    359 	return r;
    360 }
    361 
    362 uint64_t
    363 cprng_strong64(void)
    364 {
    365 	uint64_t r;
    366 	cprng_strong(kern_cprng, &r, sizeof(r), 0);
    367 	return r;
    368 }
    369