subr_cprng.c revision 1.29.4.2 1 /* $NetBSD: subr_cprng.c,v 1.29.4.2 2020/04/21 18:42:42 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Thor Lancelot Simon and Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.29.4.2 2020/04/21 18:42:42 martin Exp $");
34
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/condvar.h>
38 #include <sys/cprng.h>
39 #include <sys/errno.h>
40 #include <sys/event.h> /* XXX struct knote */
41 #include <sys/fcntl.h> /* XXX FNONBLOCK */
42 #include <sys/kernel.h>
43 #include <sys/kmem.h>
44 #include <sys/lwp.h>
45 #include <sys/once.h>
46 #include <sys/percpu.h>
47 #include <sys/poll.h> /* XXX POLLIN/POLLOUT/&c. */
48 #include <sys/select.h>
49 #include <sys/systm.h>
50 #include <sys/sysctl.h>
51 #include <sys/rndsink.h>
52
53 #include <crypto/nist_hash_drbg/nist_hash_drbg.h>
54
55 #if defined(__HAVE_CPU_COUNTER)
56 #include <machine/cpu_counter.h>
57 #endif
58
59 static int sysctl_kern_urnd(SYSCTLFN_PROTO);
60 static int sysctl_kern_arnd(SYSCTLFN_PROTO);
61
62 static void cprng_strong_generate(struct cprng_strong *, void *, size_t);
63 static void cprng_strong_reseed(struct cprng_strong *);
64 static void cprng_strong_reseed_from(struct cprng_strong *, const void *,
65 size_t, bool);
66
67 static rndsink_callback_t cprng_strong_rndsink_callback;
68
69 void
70 cprng_init(void)
71 {
72 static struct sysctllog *random_sysctllog;
73
74 if (nist_hash_drbg_initialize() != 0)
75 panic("NIST Hash_DRBG failed self-test");
76
77 sysctl_createv(&random_sysctllog, 0, NULL, NULL,
78 CTLFLAG_PERMANENT,
79 CTLTYPE_INT, "urandom",
80 SYSCTL_DESCR("Random integer value"),
81 sysctl_kern_urnd, 0, NULL, 0,
82 CTL_KERN, KERN_URND, CTL_EOL);
83 sysctl_createv(&random_sysctllog, 0, NULL, NULL,
84 CTLFLAG_PERMANENT,
85 CTLTYPE_INT, "arandom",
86 SYSCTL_DESCR("n bytes of random data"),
87 sysctl_kern_arnd, 0, NULL, 0,
88 CTL_KERN, KERN_ARND, CTL_EOL);
89 }
90
91 static inline uint32_t
92 cprng_counter(void)
93 {
94 struct timeval tv;
95
96 #if defined(__HAVE_CPU_COUNTER)
97 if (cpu_hascounter())
98 return cpu_counter32();
99 #endif
100 if (__predict_false(cold)) {
101 static int ctr;
102 /* microtime unsafe if clock not running yet */
103 return ctr++;
104 }
105 getmicrotime(&tv);
106 return (tv.tv_sec * 1000000 + tv.tv_usec);
107 }
108
109 struct cprng_strong {
110 char cs_name[16];
111 int cs_flags;
112 kmutex_t cs_lock;
113 percpu_t *cs_percpu;
114 kcondvar_t cs_cv;
115 struct selinfo cs_selq;
116 struct rndsink *cs_rndsink;
117 bool cs_ready;
118 NIST_HASH_DRBG cs_drbg;
119
120 /* XXX Kludge for /dev/random `information-theoretic' properties. */
121 unsigned int cs_remaining;
122 };
123
124 struct cprng_strong *
125 cprng_strong_create(const char *name, int ipl, int flags)
126 {
127 const uint32_t cc = cprng_counter();
128 struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
129 KM_SLEEP);
130
131 /*
132 * rndsink_request takes a spin lock at IPL_VM, so we can be no
133 * higher than that.
134 */
135 KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
136
137 /* Initialize the easy fields. */
138 memset(cprng->cs_name, 0, sizeof(cprng->cs_name));
139 (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
140 cprng->cs_flags = flags;
141 mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
142 cv_init(&cprng->cs_cv, cprng->cs_name);
143 selinit(&cprng->cs_selq);
144 cprng->cs_rndsink = rndsink_create(NIST_HASH_DRBG_MIN_SEEDLEN_BYTES,
145 &cprng_strong_rndsink_callback, cprng);
146
147 /* Get some initial entropy. Record whether it is full entropy. */
148 uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
149 mutex_enter(&cprng->cs_lock);
150 cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
151 sizeof(seed));
152 if (nist_hash_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
153 &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
154 /* XXX Fix nist_hash_drbg API so this can't happen. */
155 panic("cprng %s: NIST Hash_DRBG instantiation failed",
156 cprng->cs_name);
157 explicit_memset(seed, 0, sizeof(seed));
158
159 if (ISSET(flags, CPRNG_HARD))
160 cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
161 else
162 cprng->cs_remaining = 0;
163
164 if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
165 printf("cprng %s: creating with partial entropy\n",
166 cprng->cs_name);
167 mutex_exit(&cprng->cs_lock);
168
169 return cprng;
170 }
171
172 void
173 cprng_strong_destroy(struct cprng_strong *cprng)
174 {
175
176 /*
177 * Destroy the rndsink first to prevent calls to the callback.
178 */
179 rndsink_destroy(cprng->cs_rndsink);
180
181 KASSERT(!cv_has_waiters(&cprng->cs_cv));
182 #if 0
183 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
184 #endif
185
186 nist_hash_drbg_destroy(&cprng->cs_drbg);
187 seldestroy(&cprng->cs_selq);
188 cv_destroy(&cprng->cs_cv);
189 mutex_destroy(&cprng->cs_lock);
190
191 explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
192 kmem_free(cprng, sizeof(*cprng));
193 }
194
195 /*
196 * Generate some data from cprng. Block or return zero bytes,
197 * depending on flags & FNONBLOCK, if cprng was created without
198 * CPRNG_REKEY_ANY.
199 */
200 size_t
201 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
202 {
203 size_t result;
204
205 /* Caller must loop for more than CPRNG_MAX_LEN bytes. */
206 bytes = MIN(bytes, CPRNG_MAX_LEN);
207
208 mutex_enter(&cprng->cs_lock);
209
210 if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
211 if (!cprng->cs_ready)
212 cprng_strong_reseed(cprng);
213 } else {
214 while (!cprng->cs_ready) {
215 if (ISSET(flags, FNONBLOCK) ||
216 !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
217 cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
218 result = 0;
219 goto out;
220 }
221 }
222 }
223
224 /*
225 * Debit the entropy if requested.
226 *
227 * XXX Kludge for /dev/random `information-theoretic' properties.
228 */
229 if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
230 KASSERT(0 < cprng->cs_remaining);
231 KASSERT(cprng->cs_remaining <=
232 NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
233 if (bytes < cprng->cs_remaining) {
234 cprng->cs_remaining -= bytes;
235 } else {
236 bytes = cprng->cs_remaining;
237 cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
238 cprng->cs_ready = false;
239 rndsink_schedule(cprng->cs_rndsink);
240 }
241 KASSERT(bytes <= NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
242 KASSERT(0 < cprng->cs_remaining);
243 KASSERT(cprng->cs_remaining <=
244 NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
245 }
246
247 cprng_strong_generate(cprng, buffer, bytes);
248 result = bytes;
249
250 out: mutex_exit(&cprng->cs_lock);
251 return result;
252 }
253
254 uint32_t
255 cprng_strong32(void)
256 {
257 uint32_t r;
258 cprng_strong(kern_cprng, &r, sizeof(r), 0);
259 return r;
260 }
261
262 uint64_t
263 cprng_strong64(void)
264 {
265 uint64_t r;
266 cprng_strong(kern_cprng, &r, sizeof(r), 0);
267 return r;
268 }
269
270 static void
271 filt_cprng_detach(struct knote *kn)
272 {
273 struct cprng_strong *const cprng = kn->kn_hook;
274
275 mutex_enter(&cprng->cs_lock);
276 SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
277 mutex_exit(&cprng->cs_lock);
278 }
279
280 static int
281 filt_cprng_read_event(struct knote *kn, long hint)
282 {
283 struct cprng_strong *const cprng = kn->kn_hook;
284 int ret;
285
286 if (hint == NOTE_SUBMIT)
287 KASSERT(mutex_owned(&cprng->cs_lock));
288 else
289 mutex_enter(&cprng->cs_lock);
290 if (cprng->cs_ready) {
291 kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */
292 ret = 1;
293 } else {
294 ret = 0;
295 }
296 if (hint == NOTE_SUBMIT)
297 KASSERT(mutex_owned(&cprng->cs_lock));
298 else
299 mutex_exit(&cprng->cs_lock);
300
301 return ret;
302 }
303
304 static int
305 filt_cprng_write_event(struct knote *kn, long hint)
306 {
307 struct cprng_strong *const cprng = kn->kn_hook;
308
309 if (hint == NOTE_SUBMIT)
310 KASSERT(mutex_owned(&cprng->cs_lock));
311 else
312 mutex_enter(&cprng->cs_lock);
313
314 kn->kn_data = 0;
315
316 if (hint == NOTE_SUBMIT)
317 KASSERT(mutex_owned(&cprng->cs_lock));
318 else
319 mutex_exit(&cprng->cs_lock);
320
321 return 0;
322 }
323
324 static const struct filterops cprng_read_filtops = {
325 .f_isfd = 1,
326 .f_attach = NULL,
327 .f_detach = filt_cprng_detach,
328 .f_event = filt_cprng_read_event,
329 };
330
331 static const struct filterops cprng_write_filtops = {
332 .f_isfd = 1,
333 .f_attach = NULL,
334 .f_detach = filt_cprng_detach,
335 .f_event = filt_cprng_write_event,
336 };
337
338 int
339 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
340 {
341
342 switch (kn->kn_filter) {
343 case EVFILT_READ:
344 kn->kn_fop = &cprng_read_filtops;
345 break;
346 case EVFILT_WRITE:
347 kn->kn_fop = &cprng_write_filtops;
348 break;
349 default:
350 return EINVAL;
351 }
352
353 kn->kn_hook = cprng;
354 mutex_enter(&cprng->cs_lock);
355 SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
356 mutex_exit(&cprng->cs_lock);
357 return 0;
358 }
359
360 int
361 cprng_strong_poll(struct cprng_strong *cprng, int events)
362 {
363 int revents;
364
365 if (!ISSET(events, (POLLIN | POLLRDNORM)))
366 return 0;
367
368 mutex_enter(&cprng->cs_lock);
369 if (cprng->cs_ready) {
370 revents = (events & (POLLIN | POLLRDNORM));
371 } else {
372 selrecord(curlwp, &cprng->cs_selq);
373 revents = 0;
374 }
375 mutex_exit(&cprng->cs_lock);
376
377 return revents;
378 }
379
380 /*
381 * XXX Move nist_hash_drbg_reseed_advised_p and
382 * nist_hash_drbg_reseed_needed_p into the nist_hash_drbg API and make
383 * the NIST_HASH_DRBG structure opaque.
384 */
385 static bool
386 nist_hash_drbg_reseed_advised_p(NIST_HASH_DRBG *drbg)
387 {
388
389 return (drbg->reseed_counter > (NIST_HASH_DRBG_RESEED_INTERVAL / 2));
390 }
391
392 static bool
393 nist_hash_drbg_reseed_needed_p(NIST_HASH_DRBG *drbg)
394 {
395
396 return (drbg->reseed_counter >= NIST_HASH_DRBG_RESEED_INTERVAL);
397 }
398
399 /*
400 * Generate some data from the underlying generator.
401 */
402 static void
403 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
404 {
405 const uint32_t cc = cprng_counter();
406
407 KASSERT(bytes <= CPRNG_MAX_LEN);
408 KASSERT(mutex_owned(&cprng->cs_lock));
409
410 /*
411 * Generate some data from the NIST Hash_DRBG. Caller
412 * guarantees reseed if we're not ready, and if we exhaust the
413 * generator, we mark ourselves not ready. Consequently, this
414 * call to the Hash_DRBG should not fail.
415 */
416 if (__predict_false(nist_hash_drbg_generate(&cprng->cs_drbg, buffer,
417 bytes, &cc, sizeof(cc))))
418 panic("cprng %s: NIST Hash_DRBG failed", cprng->cs_name);
419
420 /*
421 * If we've been seeing a lot of use, ask for some fresh
422 * entropy soon.
423 */
424 if (__predict_false(nist_hash_drbg_reseed_advised_p(&cprng->cs_drbg)))
425 rndsink_schedule(cprng->cs_rndsink);
426
427 /*
428 * If we just exhausted the generator, inform the next user
429 * that we need a reseed.
430 */
431 if (__predict_false(nist_hash_drbg_reseed_needed_p(&cprng->cs_drbg))) {
432 cprng->cs_ready = false;
433 rndsink_schedule(cprng->cs_rndsink); /* paranoia */
434 }
435 }
436
437 /*
438 * Reseed with whatever we can get from the system entropy pool right now.
439 */
440 static void
441 cprng_strong_reseed(struct cprng_strong *cprng)
442 {
443 uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
444
445 KASSERT(mutex_owned(&cprng->cs_lock));
446
447 const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
448 sizeof(seed));
449 cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
450 explicit_memset(seed, 0, sizeof(seed));
451 }
452
453 /*
454 * Reseed with the given seed. If we now have full entropy, notify waiters.
455 */
456 static void
457 cprng_strong_reseed_from(struct cprng_strong *cprng,
458 const void *seed, size_t bytes, bool full_entropy)
459 {
460 const uint32_t cc = cprng_counter();
461
462 KASSERT(bytes == NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
463 KASSERT(mutex_owned(&cprng->cs_lock));
464
465 /*
466 * Notify anyone interested in the partiality of entropy in our
467 * seed -- anyone waiting for full entropy, or any system
468 * operators interested in knowing when the entropy pool is
469 * running on fumes.
470 */
471 if (full_entropy) {
472 if (!cprng->cs_ready) {
473 cprng->cs_ready = true;
474 cv_broadcast(&cprng->cs_cv);
475 selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
476 NOTE_SUBMIT);
477 }
478 } else {
479 /*
480 * XXX Is there is any harm in reseeding with partial
481 * entropy when we had full entropy before? If so,
482 * remove the conditional on this message.
483 */
484 if (!cprng->cs_ready &&
485 !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
486 printf("cprng %s: reseeding with partial entropy\n",
487 cprng->cs_name);
488 }
489
490 if (nist_hash_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc,
491 sizeof(cc)))
492 /* XXX Fix nist_hash_drbg API so this can't happen. */
493 panic("cprng %s: NIST Hash_DRBG reseed failed",
494 cprng->cs_name);
495 }
496
497 /*
498 * Feed entropy from an rndsink request into the CPRNG for which the
499 * request was issued.
500 */
501 static void
502 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
503 {
504 struct cprng_strong *const cprng = context;
505
506 mutex_enter(&cprng->cs_lock);
507 /* Assume that rndsinks provide only full-entropy output. */
508 cprng_strong_reseed_from(cprng, seed, bytes, true);
509 mutex_exit(&cprng->cs_lock);
510 }
511
512 static ONCE_DECL(sysctl_prng_once);
513 static cprng_strong_t *sysctl_prng;
514
515 static int
516 makeprng(void)
517 {
518
519 /* can't create in cprng_init(), too early */
520 sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
521 CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
522 return 0;
523 }
524
525 /*
526 * sysctl helper routine for kern.urandom node. Picks a random number
527 * for you.
528 */
529 static int
530 sysctl_kern_urnd(SYSCTLFN_ARGS)
531 {
532 int v, rv;
533
534 RUN_ONCE(&sysctl_prng_once, makeprng);
535 rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
536 if (rv == sizeof(v)) {
537 struct sysctlnode node = *rnode;
538 node.sysctl_data = &v;
539 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
540 }
541 else
542 return (EIO); /*XXX*/
543 }
544
545 /*
546 * sysctl helper routine for kern.arandom node. Fills the supplied
547 * structure with random data for you.
548 *
549 * This node was originally declared as type "int" but its implementation
550 * in OpenBSD, whence it came, would happily return up to 8K of data if
551 * requested. Evidently this was used to key RC4 in userspace.
552 *
553 * In NetBSD, the libc stack-smash-protection code reads 64 bytes
554 * from here at every program startup. Third-party software also often
555 * uses this to obtain a key for CSPRNG, reading 32 bytes or more, while
556 * avoiding the need to open /dev/urandom.
557 */
558 static int
559 sysctl_kern_arnd(SYSCTLFN_ARGS)
560 {
561 int error;
562 void *v;
563 struct sysctlnode node = *rnode;
564 size_t n __diagused;
565
566 switch (*oldlenp) {
567 case 0:
568 return 0;
569 default:
570 if (*oldlenp > 256) {
571 return E2BIG;
572 }
573 RUN_ONCE(&sysctl_prng_once, makeprng);
574 v = kmem_alloc(*oldlenp, KM_SLEEP);
575 n = cprng_strong(sysctl_prng, v, *oldlenp, 0);
576 KASSERT(n == *oldlenp);
577 node.sysctl_data = v;
578 node.sysctl_size = *oldlenp;
579 error = sysctl_lookup(SYSCTLFN_CALL(&node));
580 kmem_free(v, *oldlenp);
581 return error;
582 }
583 }
584