random.c revision 1.5
11.5Sriastrad/* $NetBSD: random.c,v 1.5 2020/05/08 15:55:05 riastradh Exp $ */ 21.1Sriastrad 31.1Sriastrad/*- 41.1Sriastrad * Copyright (c) 2019 The NetBSD Foundation, Inc. 51.1Sriastrad * All rights reserved. 61.1Sriastrad * 71.1Sriastrad * This code is derived from software contributed to The NetBSD Foundation 81.1Sriastrad * by Taylor R. Campbell. 91.1Sriastrad * 101.1Sriastrad * Redistribution and use in source and binary forms, with or without 111.1Sriastrad * modification, are permitted provided that the following conditions 121.1Sriastrad * are met: 131.1Sriastrad * 1. Redistributions of source code must retain the above copyright 141.1Sriastrad * notice, this list of conditions and the following disclaimer. 151.1Sriastrad * 2. Redistributions in binary form must reproduce the above copyright 161.1Sriastrad * notice, this list of conditions and the following disclaimer in the 171.1Sriastrad * documentation and/or other materials provided with the distribution. 181.1Sriastrad * 191.1Sriastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 201.1Sriastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 211.1Sriastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 221.1Sriastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 231.1Sriastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 241.1Sriastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 251.1Sriastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 261.1Sriastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 271.1Sriastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 281.1Sriastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 291.1Sriastrad * POSSIBILITY OF SUCH DAMAGE. 301.1Sriastrad */ 311.1Sriastrad 321.1Sriastrad/* 331.1Sriastrad * /dev/random, /dev/urandom -- stateless version 341.1Sriastrad * 351.1Sriastrad * For short reads from /dev/urandom, up to 256 bytes, read from a 361.1Sriastrad * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the 371.1Sriastrad * system has enough entropy. 381.1Sriastrad * 391.1Sriastrad * For all other reads, instantiate a fresh NIST Hash_DRBG from 401.1Sriastrad * the global entropy pool, and draw from it. 411.1Sriastrad * 421.1Sriastrad * Each read is independent; there is no per-open state. 431.1Sriastrad * Concurrent reads from the same open run in parallel. 441.1Sriastrad * 451.1Sriastrad * Reading from /dev/random may block until entropy is available. 461.1Sriastrad * Either device may return short reads if interrupted. 471.1Sriastrad */ 481.1Sriastrad 491.1Sriastrad#include <sys/cdefs.h> 501.5Sriastrad__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.5 2020/05/08 15:55:05 riastradh Exp $"); 511.1Sriastrad 521.1Sriastrad#include <sys/param.h> 531.1Sriastrad#include <sys/types.h> 541.2Sriastrad#include <sys/atomic.h> 551.1Sriastrad#include <sys/conf.h> 561.1Sriastrad#include <sys/cprng.h> 571.1Sriastrad#include <sys/entropy.h> 581.1Sriastrad#include <sys/errno.h> 591.1Sriastrad#include <sys/event.h> 601.1Sriastrad#include <sys/fcntl.h> 611.1Sriastrad#include <sys/kauth.h> 621.5Sriastrad#include <sys/kmem.h> 631.1Sriastrad#include <sys/lwp.h> 641.1Sriastrad#include <sys/poll.h> 651.1Sriastrad#include <sys/rnd.h> 661.1Sriastrad#include <sys/rndsource.h> 671.1Sriastrad#include <sys/signalvar.h> 681.1Sriastrad#include <sys/systm.h> 691.1Sriastrad 701.1Sriastrad#include <crypto/nist_hash_drbg/nist_hash_drbg.h> 711.1Sriastrad 721.1Sriastrad#include "ioconf.h" 731.1Sriastrad 741.1Sriastradstatic dev_type_open(random_open); 751.1Sriastradstatic dev_type_close(random_close); 761.1Sriastradstatic dev_type_ioctl(random_ioctl); 771.1Sriastradstatic dev_type_poll(random_poll); 781.1Sriastradstatic dev_type_kqfilter(random_kqfilter); 791.1Sriastradstatic dev_type_read(random_read); 801.1Sriastradstatic dev_type_write(random_write); 811.1Sriastrad 821.1Sriastradconst struct cdevsw rnd_cdevsw = { 831.1Sriastrad .d_open = random_open, 841.1Sriastrad .d_close = random_close, 851.1Sriastrad .d_read = random_read, 861.1Sriastrad .d_write = random_write, 871.1Sriastrad .d_ioctl = random_ioctl, 881.1Sriastrad .d_stop = nostop, 891.1Sriastrad .d_tty = notty, 901.1Sriastrad .d_poll = random_poll, 911.1Sriastrad .d_mmap = nommap, 921.1Sriastrad .d_kqfilter = random_kqfilter, 931.1Sriastrad .d_discard = nodiscard, 941.1Sriastrad .d_flag = D_OTHER|D_MPSAFE, 951.1Sriastrad}; 961.1Sriastrad 971.1Sriastrad#define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ 981.1Sriastrad 991.1Sriastrad/* Entropy source for writes to /dev/random and /dev/urandom */ 1001.1Sriastradstatic krndsource_t user_rndsource; 1011.1Sriastrad 1021.1Sriastradvoid 1031.1Sriastradrndattach(int num) 1041.1Sriastrad{ 1051.1Sriastrad 1061.1Sriastrad rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, 1071.1Sriastrad RND_FLAG_COLLECT_VALUE); 1081.1Sriastrad} 1091.1Sriastrad 1101.1Sriastradstatic int 1111.1Sriastradrandom_open(dev_t dev, int flags, int fmt, struct lwp *l) 1121.1Sriastrad{ 1131.1Sriastrad 1141.1Sriastrad /* Validate minor. */ 1151.1Sriastrad switch (minor(dev)) { 1161.1Sriastrad case RND_DEV_RANDOM: 1171.1Sriastrad case RND_DEV_URANDOM: 1181.1Sriastrad break; 1191.1Sriastrad default: 1201.1Sriastrad return ENXIO; 1211.1Sriastrad } 1221.1Sriastrad 1231.1Sriastrad return 0; 1241.1Sriastrad} 1251.1Sriastrad 1261.1Sriastradstatic int 1271.1Sriastradrandom_close(dev_t dev, int flags, int fmt, struct lwp *l) 1281.1Sriastrad{ 1291.1Sriastrad 1301.1Sriastrad /* Success! */ 1311.1Sriastrad return 0; 1321.1Sriastrad} 1331.1Sriastrad 1341.1Sriastradstatic int 1351.1Sriastradrandom_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) 1361.1Sriastrad{ 1371.1Sriastrad 1381.1Sriastrad /* 1391.1Sriastrad * No non-blocking/async options; otherwise defer to 1401.1Sriastrad * entropy_ioctl. 1411.1Sriastrad */ 1421.1Sriastrad switch (cmd) { 1431.1Sriastrad case FIONBIO: 1441.1Sriastrad case FIOASYNC: 1451.1Sriastrad return 0; 1461.1Sriastrad default: 1471.1Sriastrad return entropy_ioctl(cmd, data); 1481.1Sriastrad } 1491.1Sriastrad} 1501.1Sriastrad 1511.1Sriastradstatic int 1521.1Sriastradrandom_poll(dev_t dev, int events, struct lwp *l) 1531.1Sriastrad{ 1541.1Sriastrad 1551.1Sriastrad /* /dev/random may block; /dev/urandom is always ready. */ 1561.1Sriastrad switch (minor(dev)) { 1571.1Sriastrad case RND_DEV_RANDOM: 1581.1Sriastrad return entropy_poll(events); 1591.1Sriastrad case RND_DEV_URANDOM: 1601.1Sriastrad return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); 1611.1Sriastrad default: 1621.1Sriastrad return 0; 1631.1Sriastrad } 1641.1Sriastrad} 1651.1Sriastrad 1661.1Sriastradstatic int 1671.1Sriastradrandom_kqfilter(dev_t dev, struct knote *kn) 1681.1Sriastrad{ 1691.1Sriastrad 1701.1Sriastrad /* Validate the event filter. */ 1711.1Sriastrad switch (kn->kn_filter) { 1721.1Sriastrad case EVFILT_READ: 1731.1Sriastrad case EVFILT_WRITE: 1741.1Sriastrad break; 1751.1Sriastrad default: 1761.1Sriastrad return EINVAL; 1771.1Sriastrad } 1781.1Sriastrad 1791.1Sriastrad /* /dev/random may block; /dev/urandom never does. */ 1801.1Sriastrad switch (minor(dev)) { 1811.1Sriastrad case RND_DEV_RANDOM: 1821.1Sriastrad if (kn->kn_filter == EVFILT_READ) 1831.1Sriastrad return entropy_kqfilter(kn); 1841.1Sriastrad /* FALLTHROUGH */ 1851.1Sriastrad case RND_DEV_URANDOM: 1861.1Sriastrad kn->kn_fop = &seltrue_filtops; 1871.1Sriastrad return 0; 1881.1Sriastrad default: 1891.1Sriastrad return ENXIO; 1901.1Sriastrad } 1911.1Sriastrad} 1921.1Sriastrad 1931.1Sriastrad/* 1941.1Sriastrad * random_read(dev, uio, flags) 1951.1Sriastrad * 1961.1Sriastrad * Generate data from a PRNG seeded from the entropy pool. 1971.1Sriastrad * 1981.1Sriastrad * - If /dev/random, block until we have full entropy, or fail 1991.1Sriastrad * with EWOULDBLOCK, and if `depleting' entropy, return at most 2001.1Sriastrad * the entropy pool's capacity at once. 2011.1Sriastrad * 2021.1Sriastrad * - If /dev/urandom, generate data from whatever is in the 2031.1Sriastrad * entropy pool now. 2041.1Sriastrad * 2051.1Sriastrad * On interrupt, return a short read, but not shorter than 256 2061.1Sriastrad * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is 2071.1Sriastrad * 512 for hysterical raisins). 2081.1Sriastrad */ 2091.1Sriastradstatic int 2101.1Sriastradrandom_read(dev_t dev, struct uio *uio, int flags) 2111.1Sriastrad{ 2121.1Sriastrad uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; 2131.1Sriastrad struct nist_hash_drbg drbg; 2141.1Sriastrad uint8_t *buf; 2151.1Sriastrad int extractflags; 2161.1Sriastrad bool interruptible; 2171.1Sriastrad int error; 2181.1Sriastrad 2191.1Sriastrad /* Get a buffer for transfers. */ 2201.5Sriastrad buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); 2211.1Sriastrad 2221.1Sriastrad /* 2231.1Sriastrad * If it's a short read from /dev/urandom, just generate the 2241.1Sriastrad * output directly with per-CPU cprng_strong. 2251.1Sriastrad */ 2261.1Sriastrad if (minor(dev) == RND_DEV_URANDOM && 2271.1Sriastrad uio->uio_resid <= RANDOM_BUFSIZE) { 2281.1Sriastrad /* Generate data and transfer it out. */ 2291.1Sriastrad cprng_strong(user_cprng, buf, uio->uio_resid, 0); 2301.1Sriastrad error = uiomove(buf, uio->uio_resid, uio); 2311.1Sriastrad goto out; 2321.1Sriastrad } 2331.1Sriastrad 2341.1Sriastrad /* 2351.1Sriastrad * If we're doing a blocking read from /dev/random, wait 2361.1Sriastrad * interruptibly. Otherwise, don't wait. 2371.1Sriastrad */ 2381.1Sriastrad if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK)) 2391.1Sriastrad extractflags = ENTROPY_WAIT|ENTROPY_SIG; 2401.1Sriastrad else 2411.1Sriastrad extractflags = 0; 2421.1Sriastrad 2431.1Sriastrad /* 2441.1Sriastrad * Query the entropy pool. For /dev/random, stop here if this 2451.1Sriastrad * fails. For /dev/urandom, go on either way -- 2461.1Sriastrad * entropy_extract will always fill the buffer with what we 2471.1Sriastrad * have from the global pool. 2481.1Sriastrad */ 2491.1Sriastrad error = entropy_extract(seed, sizeof seed, extractflags); 2501.1Sriastrad if (minor(dev) == RND_DEV_RANDOM && error) 2511.1Sriastrad goto out; 2521.1Sriastrad 2531.1Sriastrad /* Instantiate the DRBG. */ 2541.1Sriastrad if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0, 2551.1Sriastrad NULL, 0)) 2561.1Sriastrad panic("nist_hash_drbg_instantiate"); 2571.1Sriastrad 2581.1Sriastrad /* Promptly zero the seed. */ 2591.1Sriastrad explicit_memset(seed, 0, sizeof seed); 2601.1Sriastrad 2611.1Sriastrad /* 2621.1Sriastrad * Generate data. Assume no error until failure. No 2631.1Sriastrad * interruption at this point until we've generated at least 2641.1Sriastrad * one block of output. 2651.1Sriastrad */ 2661.1Sriastrad error = 0; 2671.1Sriastrad interruptible = false; 2681.1Sriastrad while (uio->uio_resid) { 2691.1Sriastrad size_t n = uio->uio_resid; 2701.1Sriastrad 2711.1Sriastrad /* No more than one buffer's worth. */ 2721.1Sriastrad n = MIN(n, RANDOM_BUFSIZE); 2731.1Sriastrad 2741.1Sriastrad /* 2751.4Sriastrad * Clamp /dev/random output to the entropy capacity and 2761.4Sriastrad * seed size. Programs can't rely on long reads. 2771.1Sriastrad */ 2781.4Sriastrad if (minor(dev) == RND_DEV_RANDOM) { 2791.1Sriastrad n = MIN(n, ENTROPY_CAPACITY); 2801.1Sriastrad n = MIN(n, sizeof seed); 2811.1Sriastrad /* 2821.1Sriastrad * Guarantee never to return more than one 2831.1Sriastrad * buffer in this case to minimize bookkeeping. 2841.1Sriastrad */ 2851.1Sriastrad CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE); 2861.1Sriastrad CTASSERT(sizeof seed <= RANDOM_BUFSIZE); 2871.1Sriastrad } 2881.1Sriastrad 2891.1Sriastrad /* Yield if requested. */ 2901.1Sriastrad if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 2911.1Sriastrad preempt(); 2921.1Sriastrad 2931.1Sriastrad /* 2941.1Sriastrad * Allow interruption, but only after providing a 2951.1Sriastrad * minimum number of bytes. 2961.1Sriastrad */ 2971.1Sriastrad CTASSERT(RANDOM_BUFSIZE >= 256); 2981.1Sriastrad /* Check for interruption. */ 2991.1Sriastrad if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 3001.1Sriastrad interruptible && sigispending(curlwp, 0)) { 3011.1Sriastrad error = EINTR; /* XXX ERESTART? */ 3021.1Sriastrad break; 3031.1Sriastrad } 3041.1Sriastrad 3051.1Sriastrad /* 3061.1Sriastrad * Try to generate a block of data, but if we've hit 3071.1Sriastrad * the DRBG reseed interval, reseed. 3081.1Sriastrad */ 3091.1Sriastrad if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) { 3101.1Sriastrad /* 3111.1Sriastrad * Get a fresh seed without blocking -- we have 3121.1Sriastrad * already generated some output so it is not 3131.1Sriastrad * useful to block. This can fail only if the 3141.1Sriastrad * request is obscenely large, so it is OK for 3151.1Sriastrad * either /dev/random or /dev/urandom to fail: 3161.1Sriastrad * we make no promises about gigabyte-sized 3171.1Sriastrad * reads happening all at once. 3181.1Sriastrad */ 3191.1Sriastrad error = entropy_extract(seed, sizeof seed, 0); 3201.1Sriastrad if (error) 3211.1Sriastrad break; 3221.1Sriastrad 3231.1Sriastrad /* Reseed and try again. */ 3241.1Sriastrad if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed, 3251.1Sriastrad NULL, 0)) 3261.1Sriastrad panic("nist_hash_drbg_reseed"); 3271.1Sriastrad 3281.1Sriastrad /* Promptly zero the seed. */ 3291.1Sriastrad explicit_memset(seed, 0, sizeof seed); 3301.1Sriastrad 3311.1Sriastrad /* If it fails now, that's a bug. */ 3321.1Sriastrad if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) 3331.1Sriastrad panic("nist_hash_drbg_generate"); 3341.1Sriastrad } 3351.1Sriastrad 3361.1Sriastrad /* Transfer n bytes out. */ 3371.1Sriastrad error = uiomove(buf, n, uio); 3381.1Sriastrad if (error) 3391.1Sriastrad break; 3401.1Sriastrad 3411.1Sriastrad /* 3421.4Sriastrad * If this is /dev/random, stop here, return what we 3431.4Sriastrad * have, and force the next read to reseed. Programs 3441.4Sriastrad * can't rely on /dev/random for long reads. 3451.1Sriastrad */ 3461.4Sriastrad if (minor(dev) == RND_DEV_RANDOM) { 3471.1Sriastrad error = 0; 3481.1Sriastrad break; 3491.1Sriastrad } 3501.1Sriastrad 3511.1Sriastrad /* 3521.1Sriastrad * We have generated one block of output, so it is 3531.1Sriastrad * reasonable to allow interruption after this point. 3541.1Sriastrad */ 3551.1Sriastrad interruptible = true; 3561.1Sriastrad } 3571.1Sriastrad 3581.5Sriastradout: /* Zero the buffer and free it. */ 3591.1Sriastrad explicit_memset(buf, 0, RANDOM_BUFSIZE); 3601.5Sriastrad kmem_free(buf, RANDOM_BUFSIZE); 3611.1Sriastrad 3621.1Sriastrad return error; 3631.1Sriastrad} 3641.1Sriastrad 3651.1Sriastrad/* 3661.1Sriastrad * random_write(dev, uio, flags) 3671.1Sriastrad * 3681.1Sriastrad * Enter data from uio into the entropy pool. 3691.1Sriastrad * 3701.1Sriastrad * Assume privileged users provide full entropy, and unprivileged 3711.1Sriastrad * users provide no entropy. If you have a nonuniform source of 3721.1Sriastrad * data with n bytes of min-entropy, hash it with an XOF like 3731.1Sriastrad * SHAKE128 into exactly n bytes first. 3741.1Sriastrad */ 3751.1Sriastradstatic int 3761.1Sriastradrandom_write(dev_t dev, struct uio *uio, int flags) 3771.1Sriastrad{ 3781.1Sriastrad kauth_cred_t cred = kauth_cred_get(); 3791.1Sriastrad uint8_t *buf; 3801.3Sriastrad bool privileged = false, any = false; 3811.1Sriastrad int error = 0; 3821.1Sriastrad 3831.1Sriastrad /* Verify user's authorization to affect the entropy pool. */ 3841.1Sriastrad error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, 3851.1Sriastrad NULL, NULL, NULL, NULL); 3861.1Sriastrad if (error) 3871.1Sriastrad return error; 3881.1Sriastrad 3891.1Sriastrad /* 3901.1Sriastrad * Check whether user is privileged. If so, assume user 3911.1Sriastrad * furnishes full-entropy data; if not, accept user's data but 3921.1Sriastrad * assume it has zero entropy when we do accounting. If you 3931.1Sriastrad * want to specify less entropy, use ioctl(RNDADDDATA). 3941.1Sriastrad */ 3951.1Sriastrad if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 3961.1Sriastrad NULL, NULL, NULL, NULL) == 0) 3971.1Sriastrad privileged = true; 3981.1Sriastrad 3991.1Sriastrad /* Get a buffer for transfers. */ 4001.5Sriastrad buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); 4011.1Sriastrad 4021.1Sriastrad /* Consume data. */ 4031.1Sriastrad while (uio->uio_resid) { 4041.1Sriastrad size_t n = uio->uio_resid; 4051.1Sriastrad 4061.1Sriastrad /* No more than one buffer's worth in one step. */ 4071.1Sriastrad n = MIN(uio->uio_resid, RANDOM_BUFSIZE); 4081.1Sriastrad 4091.1Sriastrad /* Yield if requested. */ 4101.1Sriastrad if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 4111.1Sriastrad preempt(); 4121.1Sriastrad 4131.1Sriastrad /* Check for interruption. */ 4141.1Sriastrad if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 4151.1Sriastrad sigispending(curlwp, 0)) { 4161.1Sriastrad error = EINTR; /* XXX ERESTART? */ 4171.1Sriastrad break; 4181.1Sriastrad } 4191.1Sriastrad 4201.1Sriastrad /* Transfer n bytes in and enter them into the pool. */ 4211.1Sriastrad error = uiomove(buf, n, uio); 4221.1Sriastrad if (error) 4231.1Sriastrad break; 4241.1Sriastrad rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); 4251.3Sriastrad any = true; 4261.1Sriastrad } 4271.1Sriastrad 4281.5Sriastrad /* Zero the buffer and free it. */ 4291.1Sriastrad explicit_memset(buf, 0, RANDOM_BUFSIZE); 4301.5Sriastrad kmem_free(buf, RANDOM_BUFSIZE); 4311.3Sriastrad 4321.3Sriastrad /* If we added anything, consolidate entropy now. */ 4331.3Sriastrad if (any) 4341.3Sriastrad entropy_consolidate(); 4351.3Sriastrad 4361.1Sriastrad return error; 4371.1Sriastrad} 438