subr_cprng.c revision 1.27.10.1 1 /* $NetBSD: subr_cprng.c,v 1.27.10.1 2019/09/03 12:08:22 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Thor Lancelot Simon and Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.27.10.1 2019/09/03 12:08:22 martin Exp $");
34
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/condvar.h>
38 #include <sys/cprng.h>
39 #include <sys/errno.h>
40 #include <sys/event.h> /* XXX struct knote */
41 #include <sys/fcntl.h> /* XXX FNONBLOCK */
42 #include <sys/kernel.h>
43 #include <sys/kmem.h>
44 #include <sys/lwp.h>
45 #include <sys/once.h>
46 #include <sys/percpu.h>
47 #include <sys/poll.h> /* XXX POLLIN/POLLOUT/&c. */
48 #include <sys/select.h>
49 #include <sys/systm.h>
50 #include <sys/sysctl.h>
51 #include <sys/rndsink.h>
52 #if DIAGNOSTIC
53 #include <sys/rngtest.h>
54 #endif
55
56 #include <crypto/nist_hash_drbg/nist_hash_drbg.h>
57
58 #if defined(__HAVE_CPU_COUNTER)
59 #include <machine/cpu_counter.h>
60 #endif
61
62 static int sysctl_kern_urnd(SYSCTLFN_PROTO);
63 static int sysctl_kern_arnd(SYSCTLFN_PROTO);
64
65 static void cprng_strong_generate(struct cprng_strong *, void *, size_t);
66 static void cprng_strong_reseed(struct cprng_strong *);
67 static void cprng_strong_reseed_from(struct cprng_strong *, const void *,
68 size_t, bool);
69 #if DIAGNOSTIC
70 static void cprng_strong_rngtest(struct cprng_strong *);
71 #endif
72
73 static rndsink_callback_t cprng_strong_rndsink_callback;
74
75 void
76 cprng_init(void)
77 {
78 static struct sysctllog *random_sysctllog;
79
80 if (nist_hash_drbg_initialize() != 0)
81 panic("NIST Hash_DRBG failed self-test");
82
83 sysctl_createv(&random_sysctllog, 0, NULL, NULL,
84 CTLFLAG_PERMANENT,
85 CTLTYPE_INT, "urandom",
86 SYSCTL_DESCR("Random integer value"),
87 sysctl_kern_urnd, 0, NULL, 0,
88 CTL_KERN, KERN_URND, CTL_EOL);
89 sysctl_createv(&random_sysctllog, 0, NULL, NULL,
90 CTLFLAG_PERMANENT,
91 CTLTYPE_INT, "arandom",
92 SYSCTL_DESCR("n bytes of random data"),
93 sysctl_kern_arnd, 0, NULL, 0,
94 CTL_KERN, KERN_ARND, CTL_EOL);
95 }
96
97 static inline uint32_t
98 cprng_counter(void)
99 {
100 struct timeval tv;
101
102 #if defined(__HAVE_CPU_COUNTER)
103 if (cpu_hascounter())
104 return cpu_counter32();
105 #endif
106 if (__predict_false(cold)) {
107 static int ctr;
108 /* microtime unsafe if clock not running yet */
109 return ctr++;
110 }
111 getmicrotime(&tv);
112 return (tv.tv_sec * 1000000 + tv.tv_usec);
113 }
114
115 struct cprng_strong {
116 char cs_name[16];
117 int cs_flags;
118 kmutex_t cs_lock;
119 percpu_t *cs_percpu;
120 kcondvar_t cs_cv;
121 struct selinfo cs_selq;
122 struct rndsink *cs_rndsink;
123 bool cs_ready;
124 NIST_HASH_DRBG cs_drbg;
125
126 /* XXX Kludge for /dev/random `information-theoretic' properties. */
127 unsigned int cs_remaining;
128 };
129
130 struct cprng_strong *
131 cprng_strong_create(const char *name, int ipl, int flags)
132 {
133 const uint32_t cc = cprng_counter();
134 struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
135 KM_SLEEP);
136
137 /*
138 * rndsink_request takes a spin lock at IPL_VM, so we can be no
139 * higher than that.
140 */
141 KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
142
143 /* Initialize the easy fields. */
144 (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
145 cprng->cs_flags = flags;
146 mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
147 cv_init(&cprng->cs_cv, cprng->cs_name);
148 selinit(&cprng->cs_selq);
149 cprng->cs_rndsink = rndsink_create(NIST_HASH_DRBG_MIN_SEEDLEN_BYTES,
150 &cprng_strong_rndsink_callback, cprng);
151
152 /* Get some initial entropy. Record whether it is full entropy. */
153 uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
154 mutex_enter(&cprng->cs_lock);
155 cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
156 sizeof(seed));
157 if (nist_hash_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
158 &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
159 /* XXX Fix nist_hash_drbg API so this can't happen. */
160 panic("cprng %s: NIST Hash_DRBG instantiation failed",
161 cprng->cs_name);
162 explicit_memset(seed, 0, sizeof(seed));
163
164 if (ISSET(flags, CPRNG_HARD))
165 cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
166 else
167 cprng->cs_remaining = 0;
168
169 if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
170 printf("cprng %s: creating with partial entropy\n",
171 cprng->cs_name);
172 mutex_exit(&cprng->cs_lock);
173
174 return cprng;
175 }
176
177 void
178 cprng_strong_destroy(struct cprng_strong *cprng)
179 {
180
181 /*
182 * Destroy the rndsink first to prevent calls to the callback.
183 */
184 rndsink_destroy(cprng->cs_rndsink);
185
186 KASSERT(!cv_has_waiters(&cprng->cs_cv));
187 #if 0
188 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
189 #endif
190
191 nist_hash_drbg_destroy(&cprng->cs_drbg);
192 seldestroy(&cprng->cs_selq);
193 cv_destroy(&cprng->cs_cv);
194 mutex_destroy(&cprng->cs_lock);
195
196 explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
197 kmem_free(cprng, sizeof(*cprng));
198 }
199
200 /*
201 * Generate some data from cprng. Block or return zero bytes,
202 * depending on flags & FNONBLOCK, if cprng was created without
203 * CPRNG_REKEY_ANY.
204 */
205 size_t
206 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
207 {
208 size_t result;
209
210 /* Caller must loop for more than CPRNG_MAX_LEN bytes. */
211 bytes = MIN(bytes, CPRNG_MAX_LEN);
212
213 mutex_enter(&cprng->cs_lock);
214
215 if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
216 if (!cprng->cs_ready)
217 cprng_strong_reseed(cprng);
218 } else {
219 while (!cprng->cs_ready) {
220 if (ISSET(flags, FNONBLOCK) ||
221 !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
222 cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
223 result = 0;
224 goto out;
225 }
226 }
227 }
228
229 /*
230 * Debit the entropy if requested.
231 *
232 * XXX Kludge for /dev/random `information-theoretic' properties.
233 */
234 if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
235 KASSERT(0 < cprng->cs_remaining);
236 KASSERT(cprng->cs_remaining <=
237 NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
238 if (bytes < cprng->cs_remaining) {
239 cprng->cs_remaining -= bytes;
240 } else {
241 bytes = cprng->cs_remaining;
242 cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
243 cprng->cs_ready = false;
244 rndsink_schedule(cprng->cs_rndsink);
245 }
246 KASSERT(bytes <= NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
247 KASSERT(0 < cprng->cs_remaining);
248 KASSERT(cprng->cs_remaining <=
249 NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
250 }
251
252 cprng_strong_generate(cprng, buffer, bytes);
253 result = bytes;
254
255 out: mutex_exit(&cprng->cs_lock);
256 return result;
257 }
258
259 static void filt_cprng_detach(struct knote *);
260 static int filt_cprng_event(struct knote *, long);
261
262 static const struct filterops cprng_filtops =
263 { 1, NULL, filt_cprng_detach, filt_cprng_event };
264
265 int
266 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
267 {
268
269 switch (kn->kn_filter) {
270 case EVFILT_READ:
271 kn->kn_fop = &cprng_filtops;
272 kn->kn_hook = cprng;
273 mutex_enter(&cprng->cs_lock);
274 SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
275 mutex_exit(&cprng->cs_lock);
276 return 0;
277
278 case EVFILT_WRITE:
279 default:
280 return EINVAL;
281 }
282 }
283
284 static void
285 filt_cprng_detach(struct knote *kn)
286 {
287 struct cprng_strong *const cprng = kn->kn_hook;
288
289 mutex_enter(&cprng->cs_lock);
290 SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
291 mutex_exit(&cprng->cs_lock);
292 }
293
294 static int
295 filt_cprng_event(struct knote *kn, long hint)
296 {
297 struct cprng_strong *const cprng = kn->kn_hook;
298 int ret;
299
300 if (hint == NOTE_SUBMIT)
301 KASSERT(mutex_owned(&cprng->cs_lock));
302 else
303 mutex_enter(&cprng->cs_lock);
304 if (cprng->cs_ready) {
305 kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */
306 ret = 1;
307 } else {
308 ret = 0;
309 }
310 if (hint == NOTE_SUBMIT)
311 KASSERT(mutex_owned(&cprng->cs_lock));
312 else
313 mutex_exit(&cprng->cs_lock);
314
315 return ret;
316 }
317
318 int
319 cprng_strong_poll(struct cprng_strong *cprng, int events)
320 {
321 int revents;
322
323 if (!ISSET(events, (POLLIN | POLLRDNORM)))
324 return 0;
325
326 mutex_enter(&cprng->cs_lock);
327 if (cprng->cs_ready) {
328 revents = (events & (POLLIN | POLLRDNORM));
329 } else {
330 selrecord(curlwp, &cprng->cs_selq);
331 revents = 0;
332 }
333 mutex_exit(&cprng->cs_lock);
334
335 return revents;
336 }
337
338 /*
339 * XXX Move nist_hash_drbg_reseed_advised_p and
340 * nist_hash_drbg_reseed_needed_p into the nist_hash_drbg API and make
341 * the NIST_HASH_DRBG structure opaque.
342 */
343 static bool
344 nist_hash_drbg_reseed_advised_p(NIST_HASH_DRBG *drbg)
345 {
346
347 return (drbg->reseed_counter > (NIST_HASH_DRBG_RESEED_INTERVAL / 2));
348 }
349
350 static bool
351 nist_hash_drbg_reseed_needed_p(NIST_HASH_DRBG *drbg)
352 {
353
354 return (drbg->reseed_counter >= NIST_HASH_DRBG_RESEED_INTERVAL);
355 }
356
357 /*
358 * Generate some data from the underlying generator.
359 */
360 static void
361 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
362 {
363 const uint32_t cc = cprng_counter();
364
365 KASSERT(bytes <= CPRNG_MAX_LEN);
366 KASSERT(mutex_owned(&cprng->cs_lock));
367
368 /*
369 * Generate some data from the NIST Hash_DRBG. Caller
370 * guarantees reseed if we're not ready, and if we exhaust the
371 * generator, we mark ourselves not ready. Consequently, this
372 * call to the Hash_DRBG should not fail.
373 */
374 if (__predict_false(nist_hash_drbg_generate(&cprng->cs_drbg, buffer,
375 bytes, &cc, sizeof(cc))))
376 panic("cprng %s: NIST Hash_DRBG failed", cprng->cs_name);
377
378 /*
379 * If we've been seeing a lot of use, ask for some fresh
380 * entropy soon.
381 */
382 if (__predict_false(nist_hash_drbg_reseed_advised_p(&cprng->cs_drbg)))
383 rndsink_schedule(cprng->cs_rndsink);
384
385 /*
386 * If we just exhausted the generator, inform the next user
387 * that we need a reseed.
388 */
389 if (__predict_false(nist_hash_drbg_reseed_needed_p(&cprng->cs_drbg))) {
390 cprng->cs_ready = false;
391 rndsink_schedule(cprng->cs_rndsink); /* paranoia */
392 }
393 }
394
395 /*
396 * Reseed with whatever we can get from the system entropy pool right now.
397 */
398 static void
399 cprng_strong_reseed(struct cprng_strong *cprng)
400 {
401 uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
402
403 KASSERT(mutex_owned(&cprng->cs_lock));
404
405 const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
406 sizeof(seed));
407 cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
408 explicit_memset(seed, 0, sizeof(seed));
409 }
410
411 /*
412 * Reseed with the given seed. If we now have full entropy, notify waiters.
413 */
414 static void
415 cprng_strong_reseed_from(struct cprng_strong *cprng,
416 const void *seed, size_t bytes, bool full_entropy)
417 {
418 const uint32_t cc = cprng_counter();
419
420 KASSERT(bytes == NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
421 KASSERT(mutex_owned(&cprng->cs_lock));
422
423 /*
424 * Notify anyone interested in the partiality of entropy in our
425 * seed -- anyone waiting for full entropy, or any system
426 * operators interested in knowing when the entropy pool is
427 * running on fumes.
428 */
429 if (full_entropy) {
430 if (!cprng->cs_ready) {
431 cprng->cs_ready = true;
432 cv_broadcast(&cprng->cs_cv);
433 selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
434 NOTE_SUBMIT);
435 }
436 } else {
437 /*
438 * XXX Is there is any harm in reseeding with partial
439 * entropy when we had full entropy before? If so,
440 * remove the conditional on this message.
441 */
442 if (!cprng->cs_ready &&
443 !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
444 printf("cprng %s: reseeding with partial entropy\n",
445 cprng->cs_name);
446 }
447
448 if (nist_hash_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc,
449 sizeof(cc)))
450 /* XXX Fix nist_hash_drbg API so this can't happen. */
451 panic("cprng %s: NIST Hash_DRBG reseed failed",
452 cprng->cs_name);
453
454 #if DIAGNOSTIC
455 cprng_strong_rngtest(cprng);
456 #endif
457 }
458
459 #if DIAGNOSTIC
460 /*
461 * Generate some output and apply a statistical RNG test to it.
462 */
463 static void
464 cprng_strong_rngtest(struct cprng_strong *cprng)
465 {
466
467 KASSERT(mutex_owned(&cprng->cs_lock));
468
469 /* XXX Switch to a pool cache instead? */
470 rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
471 if (rt == NULL)
472 /* XXX Warn? */
473 return;
474
475 (void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
476
477 if (nist_hash_drbg_generate(&cprng->cs_drbg, rt->rt_b,
478 sizeof(rt->rt_b), NULL, 0))
479 panic("cprng %s: NIST Hash_DRBG failed after reseed",
480 cprng->cs_name);
481
482 if (rngtest(rt)) {
483 printf("cprng %s: failed statistical RNG test\n",
484 cprng->cs_name);
485 /* XXX Not clear that this does any good... */
486 cprng->cs_ready = false;
487 rndsink_schedule(cprng->cs_rndsink);
488 }
489
490 explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
491 kmem_intr_free(rt, sizeof(*rt));
492 }
493 #endif
494
495 /*
496 * Feed entropy from an rndsink request into the CPRNG for which the
497 * request was issued.
498 */
499 static void
500 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
501 {
502 struct cprng_strong *const cprng = context;
503
504 mutex_enter(&cprng->cs_lock);
505 /* Assume that rndsinks provide only full-entropy output. */
506 cprng_strong_reseed_from(cprng, seed, bytes, true);
507 mutex_exit(&cprng->cs_lock);
508 }
509
510 static cprng_strong_t *sysctl_prng;
511
512 static int
513 makeprng(void)
514 {
515
516 /* can't create in cprng_init(), too early */
517 sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
518 CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
519 return 0;
520 }
521
522 /*
523 * sysctl helper routine for kern.urandom node. Picks a random number
524 * for you.
525 */
526 static int
527 sysctl_kern_urnd(SYSCTLFN_ARGS)
528 {
529 static ONCE_DECL(control);
530 int v, rv;
531
532 RUN_ONCE(&control, makeprng);
533 rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
534 if (rv == sizeof(v)) {
535 struct sysctlnode node = *rnode;
536 node.sysctl_data = &v;
537 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
538 }
539 else
540 return (EIO); /*XXX*/
541 }
542
543 /*
544 * sysctl helper routine for kern.arandom node. Fills the supplied
545 * structure with random data for you.
546 *
547 * This node was originally declared as type "int" but its implementation
548 * in OpenBSD, whence it came, would happily return up to 8K of data if
549 * requested. Evidently this was used to key RC4 in userspace.
550 *
551 * In NetBSD, the libc stack-smash-protection code reads 64 bytes
552 * from here at every program startup. So though it would be nice
553 * to make this node return only 32 or 64 bits, we can't. Too bad!
554 */
555 static int
556 sysctl_kern_arnd(SYSCTLFN_ARGS)
557 {
558 int error;
559 void *v;
560 struct sysctlnode node = *rnode;
561
562 switch (*oldlenp) {
563 case 0:
564 return 0;
565 default:
566 if (*oldlenp > 256) {
567 return E2BIG;
568 }
569 v = kmem_alloc(*oldlenp, KM_SLEEP);
570 cprng_fast(v, *oldlenp);
571 node.sysctl_data = v;
572 node.sysctl_size = *oldlenp;
573 error = sysctl_lookup(SYSCTLFN_CALL(&node));
574 kmem_free(v, *oldlenp);
575 return error;
576 }
577 }
578