random.c revision 1.4
11.4Sriastrad/* $NetBSD: random.c,v 1.4 2020/05/08 15:53:26 riastradh Exp $ */ 21.1Sriastrad 31.1Sriastrad/*- 41.1Sriastrad * Copyright (c) 2019 The NetBSD Foundation, Inc. 51.1Sriastrad * All rights reserved. 61.1Sriastrad * 71.1Sriastrad * This code is derived from software contributed to The NetBSD Foundation 81.1Sriastrad * by Taylor R. Campbell. 91.1Sriastrad * 101.1Sriastrad * Redistribution and use in source and binary forms, with or without 111.1Sriastrad * modification, are permitted provided that the following conditions 121.1Sriastrad * are met: 131.1Sriastrad * 1. Redistributions of source code must retain the above copyright 141.1Sriastrad * notice, this list of conditions and the following disclaimer. 151.1Sriastrad * 2. Redistributions in binary form must reproduce the above copyright 161.1Sriastrad * notice, this list of conditions and the following disclaimer in the 171.1Sriastrad * documentation and/or other materials provided with the distribution. 181.1Sriastrad * 191.1Sriastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 201.1Sriastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 211.1Sriastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 221.1Sriastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 231.1Sriastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 241.1Sriastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 251.1Sriastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 261.1Sriastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 271.1Sriastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 281.1Sriastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 291.1Sriastrad * POSSIBILITY OF SUCH DAMAGE. 301.1Sriastrad */ 311.1Sriastrad 321.1Sriastrad/* 331.1Sriastrad * /dev/random, /dev/urandom -- stateless version 341.1Sriastrad * 351.1Sriastrad * For short reads from /dev/urandom, up to 256 bytes, read from a 361.1Sriastrad * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the 371.1Sriastrad * system has enough entropy. 381.1Sriastrad * 391.1Sriastrad * For all other reads, instantiate a fresh NIST Hash_DRBG from 401.1Sriastrad * the global entropy pool, and draw from it. 411.1Sriastrad * 421.1Sriastrad * Each read is independent; there is no per-open state. 431.1Sriastrad * Concurrent reads from the same open run in parallel. 441.1Sriastrad * 451.1Sriastrad * Reading from /dev/random may block until entropy is available. 461.1Sriastrad * Either device may return short reads if interrupted. 471.1Sriastrad */ 481.1Sriastrad 491.1Sriastrad#include <sys/cdefs.h> 501.4Sriastrad__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.4 2020/05/08 15:53:26 riastradh Exp $"); 511.1Sriastrad 521.1Sriastrad#include <sys/param.h> 531.1Sriastrad#include <sys/types.h> 541.2Sriastrad#include <sys/atomic.h> 551.1Sriastrad#include <sys/conf.h> 561.1Sriastrad#include <sys/cprng.h> 571.1Sriastrad#include <sys/entropy.h> 581.1Sriastrad#include <sys/errno.h> 591.1Sriastrad#include <sys/event.h> 601.1Sriastrad#include <sys/fcntl.h> 611.1Sriastrad#include <sys/kauth.h> 621.1Sriastrad#include <sys/lwp.h> 631.1Sriastrad#include <sys/poll.h> 641.1Sriastrad#include <sys/pool.h> 651.1Sriastrad#include <sys/rnd.h> 661.1Sriastrad#include <sys/rndsource.h> 671.1Sriastrad#include <sys/signalvar.h> 681.1Sriastrad#include <sys/systm.h> 691.1Sriastrad 701.1Sriastrad#include <crypto/nist_hash_drbg/nist_hash_drbg.h> 711.1Sriastrad 721.1Sriastrad#include "ioconf.h" 731.1Sriastrad 741.1Sriastradstatic dev_type_open(random_open); 751.1Sriastradstatic dev_type_close(random_close); 761.1Sriastradstatic dev_type_ioctl(random_ioctl); 771.1Sriastradstatic dev_type_poll(random_poll); 781.1Sriastradstatic dev_type_kqfilter(random_kqfilter); 791.1Sriastradstatic dev_type_read(random_read); 801.1Sriastradstatic dev_type_write(random_write); 811.1Sriastrad 821.1Sriastradconst struct cdevsw rnd_cdevsw = { 831.1Sriastrad .d_open = random_open, 841.1Sriastrad .d_close = random_close, 851.1Sriastrad .d_read = random_read, 861.1Sriastrad .d_write = random_write, 871.1Sriastrad .d_ioctl = random_ioctl, 881.1Sriastrad .d_stop = nostop, 891.1Sriastrad .d_tty = notty, 901.1Sriastrad .d_poll = random_poll, 911.1Sriastrad .d_mmap = nommap, 921.1Sriastrad .d_kqfilter = random_kqfilter, 931.1Sriastrad .d_discard = nodiscard, 941.1Sriastrad .d_flag = D_OTHER|D_MPSAFE, 951.1Sriastrad}; 961.1Sriastrad 971.1Sriastrad#define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ 981.1Sriastradstatic pool_cache_t random_buf_pc __read_mostly; 991.1Sriastrad 1001.1Sriastrad/* Entropy source for writes to /dev/random and /dev/urandom */ 1011.1Sriastradstatic krndsource_t user_rndsource; 1021.1Sriastrad 1031.1Sriastradvoid 1041.1Sriastradrndattach(int num) 1051.1Sriastrad{ 1061.1Sriastrad 1071.1Sriastrad random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0, 1081.1Sriastrad "randombuf", NULL, IPL_NONE, NULL, NULL, NULL); 1091.1Sriastrad rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, 1101.1Sriastrad RND_FLAG_COLLECT_VALUE); 1111.1Sriastrad} 1121.1Sriastrad 1131.1Sriastradstatic int 1141.1Sriastradrandom_open(dev_t dev, int flags, int fmt, struct lwp *l) 1151.1Sriastrad{ 1161.1Sriastrad 1171.1Sriastrad /* Validate minor. */ 1181.1Sriastrad switch (minor(dev)) { 1191.1Sriastrad case RND_DEV_RANDOM: 1201.1Sriastrad case RND_DEV_URANDOM: 1211.1Sriastrad break; 1221.1Sriastrad default: 1231.1Sriastrad return ENXIO; 1241.1Sriastrad } 1251.1Sriastrad 1261.1Sriastrad return 0; 1271.1Sriastrad} 1281.1Sriastrad 1291.1Sriastradstatic int 1301.1Sriastradrandom_close(dev_t dev, int flags, int fmt, struct lwp *l) 1311.1Sriastrad{ 1321.1Sriastrad 1331.1Sriastrad /* Success! */ 1341.1Sriastrad return 0; 1351.1Sriastrad} 1361.1Sriastrad 1371.1Sriastradstatic int 1381.1Sriastradrandom_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) 1391.1Sriastrad{ 1401.1Sriastrad 1411.1Sriastrad /* 1421.1Sriastrad * No non-blocking/async options; otherwise defer to 1431.1Sriastrad * entropy_ioctl. 1441.1Sriastrad */ 1451.1Sriastrad switch (cmd) { 1461.1Sriastrad case FIONBIO: 1471.1Sriastrad case FIOASYNC: 1481.1Sriastrad return 0; 1491.1Sriastrad default: 1501.1Sriastrad return entropy_ioctl(cmd, data); 1511.1Sriastrad } 1521.1Sriastrad} 1531.1Sriastrad 1541.1Sriastradstatic int 1551.1Sriastradrandom_poll(dev_t dev, int events, struct lwp *l) 1561.1Sriastrad{ 1571.1Sriastrad 1581.1Sriastrad /* /dev/random may block; /dev/urandom is always ready. */ 1591.1Sriastrad switch (minor(dev)) { 1601.1Sriastrad case RND_DEV_RANDOM: 1611.1Sriastrad return entropy_poll(events); 1621.1Sriastrad case RND_DEV_URANDOM: 1631.1Sriastrad return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); 1641.1Sriastrad default: 1651.1Sriastrad return 0; 1661.1Sriastrad } 1671.1Sriastrad} 1681.1Sriastrad 1691.1Sriastradstatic int 1701.1Sriastradrandom_kqfilter(dev_t dev, struct knote *kn) 1711.1Sriastrad{ 1721.1Sriastrad 1731.1Sriastrad /* Validate the event filter. */ 1741.1Sriastrad switch (kn->kn_filter) { 1751.1Sriastrad case EVFILT_READ: 1761.1Sriastrad case EVFILT_WRITE: 1771.1Sriastrad break; 1781.1Sriastrad default: 1791.1Sriastrad return EINVAL; 1801.1Sriastrad } 1811.1Sriastrad 1821.1Sriastrad /* /dev/random may block; /dev/urandom never does. */ 1831.1Sriastrad switch (minor(dev)) { 1841.1Sriastrad case RND_DEV_RANDOM: 1851.1Sriastrad if (kn->kn_filter == EVFILT_READ) 1861.1Sriastrad return entropy_kqfilter(kn); 1871.1Sriastrad /* FALLTHROUGH */ 1881.1Sriastrad case RND_DEV_URANDOM: 1891.1Sriastrad kn->kn_fop = &seltrue_filtops; 1901.1Sriastrad return 0; 1911.1Sriastrad default: 1921.1Sriastrad return ENXIO; 1931.1Sriastrad } 1941.1Sriastrad} 1951.1Sriastrad 1961.1Sriastrad/* 1971.1Sriastrad * random_read(dev, uio, flags) 1981.1Sriastrad * 1991.1Sriastrad * Generate data from a PRNG seeded from the entropy pool. 2001.1Sriastrad * 2011.1Sriastrad * - If /dev/random, block until we have full entropy, or fail 2021.1Sriastrad * with EWOULDBLOCK, and if `depleting' entropy, return at most 2031.1Sriastrad * the entropy pool's capacity at once. 2041.1Sriastrad * 2051.1Sriastrad * - If /dev/urandom, generate data from whatever is in the 2061.1Sriastrad * entropy pool now. 2071.1Sriastrad * 2081.1Sriastrad * On interrupt, return a short read, but not shorter than 256 2091.1Sriastrad * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is 2101.1Sriastrad * 512 for hysterical raisins). 2111.1Sriastrad */ 2121.1Sriastradstatic int 2131.1Sriastradrandom_read(dev_t dev, struct uio *uio, int flags) 2141.1Sriastrad{ 2151.1Sriastrad uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; 2161.1Sriastrad struct nist_hash_drbg drbg; 2171.1Sriastrad uint8_t *buf; 2181.1Sriastrad int extractflags; 2191.1Sriastrad bool interruptible; 2201.1Sriastrad int error; 2211.1Sriastrad 2221.1Sriastrad /* Get a buffer for transfers. */ 2231.1Sriastrad buf = pool_cache_get(random_buf_pc, PR_WAITOK); 2241.1Sriastrad 2251.1Sriastrad /* 2261.1Sriastrad * If it's a short read from /dev/urandom, just generate the 2271.1Sriastrad * output directly with per-CPU cprng_strong. 2281.1Sriastrad */ 2291.1Sriastrad if (minor(dev) == RND_DEV_URANDOM && 2301.1Sriastrad uio->uio_resid <= RANDOM_BUFSIZE) { 2311.1Sriastrad /* Generate data and transfer it out. */ 2321.1Sriastrad cprng_strong(user_cprng, buf, uio->uio_resid, 0); 2331.1Sriastrad error = uiomove(buf, uio->uio_resid, uio); 2341.1Sriastrad goto out; 2351.1Sriastrad } 2361.1Sriastrad 2371.1Sriastrad /* 2381.1Sriastrad * If we're doing a blocking read from /dev/random, wait 2391.1Sriastrad * interruptibly. Otherwise, don't wait. 2401.1Sriastrad */ 2411.1Sriastrad if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK)) 2421.1Sriastrad extractflags = ENTROPY_WAIT|ENTROPY_SIG; 2431.1Sriastrad else 2441.1Sriastrad extractflags = 0; 2451.1Sriastrad 2461.1Sriastrad /* 2471.1Sriastrad * Query the entropy pool. For /dev/random, stop here if this 2481.1Sriastrad * fails. For /dev/urandom, go on either way -- 2491.1Sriastrad * entropy_extract will always fill the buffer with what we 2501.1Sriastrad * have from the global pool. 2511.1Sriastrad */ 2521.1Sriastrad error = entropy_extract(seed, sizeof seed, extractflags); 2531.1Sriastrad if (minor(dev) == RND_DEV_RANDOM && error) 2541.1Sriastrad goto out; 2551.1Sriastrad 2561.1Sriastrad /* Instantiate the DRBG. */ 2571.1Sriastrad if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0, 2581.1Sriastrad NULL, 0)) 2591.1Sriastrad panic("nist_hash_drbg_instantiate"); 2601.1Sriastrad 2611.1Sriastrad /* Promptly zero the seed. */ 2621.1Sriastrad explicit_memset(seed, 0, sizeof seed); 2631.1Sriastrad 2641.1Sriastrad /* 2651.1Sriastrad * Generate data. Assume no error until failure. No 2661.1Sriastrad * interruption at this point until we've generated at least 2671.1Sriastrad * one block of output. 2681.1Sriastrad */ 2691.1Sriastrad error = 0; 2701.1Sriastrad interruptible = false; 2711.1Sriastrad while (uio->uio_resid) { 2721.1Sriastrad size_t n = uio->uio_resid; 2731.1Sriastrad 2741.1Sriastrad /* No more than one buffer's worth. */ 2751.1Sriastrad n = MIN(n, RANDOM_BUFSIZE); 2761.1Sriastrad 2771.1Sriastrad /* 2781.4Sriastrad * Clamp /dev/random output to the entropy capacity and 2791.4Sriastrad * seed size. Programs can't rely on long reads. 2801.1Sriastrad */ 2811.4Sriastrad if (minor(dev) == RND_DEV_RANDOM) { 2821.1Sriastrad n = MIN(n, ENTROPY_CAPACITY); 2831.1Sriastrad n = MIN(n, sizeof seed); 2841.1Sriastrad /* 2851.1Sriastrad * Guarantee never to return more than one 2861.1Sriastrad * buffer in this case to minimize bookkeeping. 2871.1Sriastrad */ 2881.1Sriastrad CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE); 2891.1Sriastrad CTASSERT(sizeof seed <= RANDOM_BUFSIZE); 2901.1Sriastrad } 2911.1Sriastrad 2921.1Sriastrad /* Yield if requested. */ 2931.1Sriastrad if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 2941.1Sriastrad preempt(); 2951.1Sriastrad 2961.1Sriastrad /* 2971.1Sriastrad * Allow interruption, but only after providing a 2981.1Sriastrad * minimum number of bytes. 2991.1Sriastrad */ 3001.1Sriastrad CTASSERT(RANDOM_BUFSIZE >= 256); 3011.1Sriastrad /* Check for interruption. */ 3021.1Sriastrad if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 3031.1Sriastrad interruptible && sigispending(curlwp, 0)) { 3041.1Sriastrad error = EINTR; /* XXX ERESTART? */ 3051.1Sriastrad break; 3061.1Sriastrad } 3071.1Sriastrad 3081.1Sriastrad /* 3091.1Sriastrad * Try to generate a block of data, but if we've hit 3101.1Sriastrad * the DRBG reseed interval, reseed. 3111.1Sriastrad */ 3121.1Sriastrad if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) { 3131.1Sriastrad /* 3141.1Sriastrad * Get a fresh seed without blocking -- we have 3151.1Sriastrad * already generated some output so it is not 3161.1Sriastrad * useful to block. This can fail only if the 3171.1Sriastrad * request is obscenely large, so it is OK for 3181.1Sriastrad * either /dev/random or /dev/urandom to fail: 3191.1Sriastrad * we make no promises about gigabyte-sized 3201.1Sriastrad * reads happening all at once. 3211.1Sriastrad */ 3221.1Sriastrad error = entropy_extract(seed, sizeof seed, 0); 3231.1Sriastrad if (error) 3241.1Sriastrad break; 3251.1Sriastrad 3261.1Sriastrad /* Reseed and try again. */ 3271.1Sriastrad if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed, 3281.1Sriastrad NULL, 0)) 3291.1Sriastrad panic("nist_hash_drbg_reseed"); 3301.1Sriastrad 3311.1Sriastrad /* Promptly zero the seed. */ 3321.1Sriastrad explicit_memset(seed, 0, sizeof seed); 3331.1Sriastrad 3341.1Sriastrad /* If it fails now, that's a bug. */ 3351.1Sriastrad if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) 3361.1Sriastrad panic("nist_hash_drbg_generate"); 3371.1Sriastrad } 3381.1Sriastrad 3391.1Sriastrad /* Transfer n bytes out. */ 3401.1Sriastrad error = uiomove(buf, n, uio); 3411.1Sriastrad if (error) 3421.1Sriastrad break; 3431.1Sriastrad 3441.1Sriastrad /* 3451.4Sriastrad * If this is /dev/random, stop here, return what we 3461.4Sriastrad * have, and force the next read to reseed. Programs 3471.4Sriastrad * can't rely on /dev/random for long reads. 3481.1Sriastrad */ 3491.4Sriastrad if (minor(dev) == RND_DEV_RANDOM) { 3501.1Sriastrad error = 0; 3511.1Sriastrad break; 3521.1Sriastrad } 3531.1Sriastrad 3541.1Sriastrad /* 3551.1Sriastrad * We have generated one block of output, so it is 3561.1Sriastrad * reasonable to allow interruption after this point. 3571.1Sriastrad */ 3581.1Sriastrad interruptible = true; 3591.1Sriastrad } 3601.1Sriastrad 3611.1Sriastradout: /* Zero the buffer and return it to the pool cache. */ 3621.1Sriastrad explicit_memset(buf, 0, RANDOM_BUFSIZE); 3631.1Sriastrad pool_cache_put(random_buf_pc, buf); 3641.1Sriastrad 3651.1Sriastrad return error; 3661.1Sriastrad} 3671.1Sriastrad 3681.1Sriastrad/* 3691.1Sriastrad * random_write(dev, uio, flags) 3701.1Sriastrad * 3711.1Sriastrad * Enter data from uio into the entropy pool. 3721.1Sriastrad * 3731.1Sriastrad * Assume privileged users provide full entropy, and unprivileged 3741.1Sriastrad * users provide no entropy. If you have a nonuniform source of 3751.1Sriastrad * data with n bytes of min-entropy, hash it with an XOF like 3761.1Sriastrad * SHAKE128 into exactly n bytes first. 3771.1Sriastrad */ 3781.1Sriastradstatic int 3791.1Sriastradrandom_write(dev_t dev, struct uio *uio, int flags) 3801.1Sriastrad{ 3811.1Sriastrad kauth_cred_t cred = kauth_cred_get(); 3821.1Sriastrad uint8_t *buf; 3831.3Sriastrad bool privileged = false, any = false; 3841.1Sriastrad int error = 0; 3851.1Sriastrad 3861.1Sriastrad /* Verify user's authorization to affect the entropy pool. */ 3871.1Sriastrad error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, 3881.1Sriastrad NULL, NULL, NULL, NULL); 3891.1Sriastrad if (error) 3901.1Sriastrad return error; 3911.1Sriastrad 3921.1Sriastrad /* 3931.1Sriastrad * Check whether user is privileged. If so, assume user 3941.1Sriastrad * furnishes full-entropy data; if not, accept user's data but 3951.1Sriastrad * assume it has zero entropy when we do accounting. If you 3961.1Sriastrad * want to specify less entropy, use ioctl(RNDADDDATA). 3971.1Sriastrad */ 3981.1Sriastrad if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 3991.1Sriastrad NULL, NULL, NULL, NULL) == 0) 4001.1Sriastrad privileged = true; 4011.1Sriastrad 4021.1Sriastrad /* Get a buffer for transfers. */ 4031.1Sriastrad buf = pool_cache_get(random_buf_pc, PR_WAITOK); 4041.1Sriastrad 4051.1Sriastrad /* Consume data. */ 4061.1Sriastrad while (uio->uio_resid) { 4071.1Sriastrad size_t n = uio->uio_resid; 4081.1Sriastrad 4091.1Sriastrad /* No more than one buffer's worth in one step. */ 4101.1Sriastrad n = MIN(uio->uio_resid, RANDOM_BUFSIZE); 4111.1Sriastrad 4121.1Sriastrad /* Yield if requested. */ 4131.1Sriastrad if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 4141.1Sriastrad preempt(); 4151.1Sriastrad 4161.1Sriastrad /* Check for interruption. */ 4171.1Sriastrad if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 4181.1Sriastrad sigispending(curlwp, 0)) { 4191.1Sriastrad error = EINTR; /* XXX ERESTART? */ 4201.1Sriastrad break; 4211.1Sriastrad } 4221.1Sriastrad 4231.1Sriastrad /* Transfer n bytes in and enter them into the pool. */ 4241.1Sriastrad error = uiomove(buf, n, uio); 4251.1Sriastrad if (error) 4261.1Sriastrad break; 4271.1Sriastrad rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); 4281.3Sriastrad any = true; 4291.1Sriastrad } 4301.1Sriastrad 4311.1Sriastrad /* Zero the buffer and return it to the pool cache. */ 4321.1Sriastrad explicit_memset(buf, 0, RANDOM_BUFSIZE); 4331.1Sriastrad pool_cache_put(random_buf_pc, buf); 4341.3Sriastrad 4351.3Sriastrad /* If we added anything, consolidate entropy now. */ 4361.3Sriastrad if (any) 4371.3Sriastrad entropy_consolidate(); 4381.3Sriastrad 4391.1Sriastrad return error; 4401.1Sriastrad} 441