random.c revision 1.7
11.7Sriastrad/*	$NetBSD: random.c,v 1.7 2020/05/08 16:05:36 riastradh Exp $	*/
21.1Sriastrad
31.1Sriastrad/*-
41.1Sriastrad * Copyright (c) 2019 The NetBSD Foundation, Inc.
51.1Sriastrad * All rights reserved.
61.1Sriastrad *
71.1Sriastrad * This code is derived from software contributed to The NetBSD Foundation
81.1Sriastrad * by Taylor R. Campbell.
91.1Sriastrad *
101.1Sriastrad * Redistribution and use in source and binary forms, with or without
111.1Sriastrad * modification, are permitted provided that the following conditions
121.1Sriastrad * are met:
131.1Sriastrad * 1. Redistributions of source code must retain the above copyright
141.1Sriastrad *    notice, this list of conditions and the following disclaimer.
151.1Sriastrad * 2. Redistributions in binary form must reproduce the above copyright
161.1Sriastrad *    notice, this list of conditions and the following disclaimer in the
171.1Sriastrad *    documentation and/or other materials provided with the distribution.
181.1Sriastrad *
191.1Sriastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
201.1Sriastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
211.1Sriastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
221.1Sriastrad * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
231.1Sriastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
241.1Sriastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
251.1Sriastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
261.1Sriastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
271.1Sriastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
281.1Sriastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
291.1Sriastrad * POSSIBILITY OF SUCH DAMAGE.
301.1Sriastrad */
311.1Sriastrad
321.1Sriastrad/*
331.1Sriastrad * /dev/random, /dev/urandom -- stateless version
341.1Sriastrad *
351.1Sriastrad *	For short reads from /dev/urandom, up to 256 bytes, read from a
361.1Sriastrad *	per-CPU NIST Hash_DRBG instance that is reseeded as soon as the
371.1Sriastrad *	system has enough entropy.
381.1Sriastrad *
391.1Sriastrad *	For all other reads, instantiate a fresh NIST Hash_DRBG from
401.1Sriastrad *	the global entropy pool, and draw from it.
411.1Sriastrad *
421.1Sriastrad *	Each read is independent; there is no per-open state.
431.1Sriastrad *	Concurrent reads from the same open run in parallel.
441.1Sriastrad *
451.1Sriastrad *	Reading from /dev/random may block until entropy is available.
461.1Sriastrad *	Either device may return short reads if interrupted.
471.1Sriastrad */
481.1Sriastrad
491.1Sriastrad#include <sys/cdefs.h>
501.7Sriastrad__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.7 2020/05/08 16:05:36 riastradh Exp $");
511.1Sriastrad
521.1Sriastrad#include <sys/param.h>
531.1Sriastrad#include <sys/types.h>
541.2Sriastrad#include <sys/atomic.h>
551.1Sriastrad#include <sys/conf.h>
561.1Sriastrad#include <sys/cprng.h>
571.1Sriastrad#include <sys/entropy.h>
581.1Sriastrad#include <sys/errno.h>
591.1Sriastrad#include <sys/event.h>
601.1Sriastrad#include <sys/fcntl.h>
611.1Sriastrad#include <sys/kauth.h>
621.5Sriastrad#include <sys/kmem.h>
631.1Sriastrad#include <sys/lwp.h>
641.1Sriastrad#include <sys/poll.h>
651.1Sriastrad#include <sys/rnd.h>
661.1Sriastrad#include <sys/rndsource.h>
671.1Sriastrad#include <sys/signalvar.h>
681.1Sriastrad#include <sys/systm.h>
691.1Sriastrad
701.1Sriastrad#include <crypto/nist_hash_drbg/nist_hash_drbg.h>
711.1Sriastrad
721.1Sriastrad#include "ioconf.h"
731.1Sriastrad
741.1Sriastradstatic dev_type_open(random_open);
751.1Sriastradstatic dev_type_close(random_close);
761.1Sriastradstatic dev_type_ioctl(random_ioctl);
771.1Sriastradstatic dev_type_poll(random_poll);
781.1Sriastradstatic dev_type_kqfilter(random_kqfilter);
791.1Sriastradstatic dev_type_read(random_read);
801.1Sriastradstatic dev_type_write(random_write);
811.1Sriastrad
821.1Sriastradconst struct cdevsw rnd_cdevsw = {
831.1Sriastrad	.d_open = random_open,
841.1Sriastrad	.d_close = random_close,
851.1Sriastrad	.d_read = random_read,
861.1Sriastrad	.d_write = random_write,
871.1Sriastrad	.d_ioctl = random_ioctl,
881.1Sriastrad	.d_stop = nostop,
891.1Sriastrad	.d_tty = notty,
901.1Sriastrad	.d_poll = random_poll,
911.1Sriastrad	.d_mmap = nommap,
921.1Sriastrad	.d_kqfilter = random_kqfilter,
931.1Sriastrad	.d_discard = nodiscard,
941.1Sriastrad	.d_flag = D_OTHER|D_MPSAFE,
951.1Sriastrad};
961.1Sriastrad
971.1Sriastrad#define	RANDOM_BUFSIZE	512	/* XXX pulled from arse */
981.1Sriastrad
991.1Sriastrad/* Entropy source for writes to /dev/random and /dev/urandom */
1001.1Sriastradstatic krndsource_t	user_rndsource;
1011.1Sriastrad
1021.1Sriastradvoid
1031.1Sriastradrndattach(int num)
1041.1Sriastrad{
1051.1Sriastrad
1061.1Sriastrad	rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN,
1071.1Sriastrad	    RND_FLAG_COLLECT_VALUE);
1081.1Sriastrad}
1091.1Sriastrad
1101.1Sriastradstatic int
1111.1Sriastradrandom_open(dev_t dev, int flags, int fmt, struct lwp *l)
1121.1Sriastrad{
1131.1Sriastrad
1141.1Sriastrad	/* Validate minor.  */
1151.1Sriastrad	switch (minor(dev)) {
1161.1Sriastrad	case RND_DEV_RANDOM:
1171.1Sriastrad	case RND_DEV_URANDOM:
1181.1Sriastrad		break;
1191.1Sriastrad	default:
1201.1Sriastrad		return ENXIO;
1211.1Sriastrad	}
1221.1Sriastrad
1231.1Sriastrad	return 0;
1241.1Sriastrad}
1251.1Sriastrad
1261.1Sriastradstatic int
1271.1Sriastradrandom_close(dev_t dev, int flags, int fmt, struct lwp *l)
1281.1Sriastrad{
1291.1Sriastrad
1301.1Sriastrad	/* Success!  */
1311.1Sriastrad	return 0;
1321.1Sriastrad}
1331.1Sriastrad
1341.1Sriastradstatic int
1351.1Sriastradrandom_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l)
1361.1Sriastrad{
1371.1Sriastrad
1381.1Sriastrad	/*
1391.1Sriastrad	 * No non-blocking/async options; otherwise defer to
1401.1Sriastrad	 * entropy_ioctl.
1411.1Sriastrad	 */
1421.1Sriastrad	switch (cmd) {
1431.1Sriastrad	case FIONBIO:
1441.1Sriastrad	case FIOASYNC:
1451.1Sriastrad		return 0;
1461.1Sriastrad	default:
1471.1Sriastrad		return entropy_ioctl(cmd, data);
1481.1Sriastrad	}
1491.1Sriastrad}
1501.1Sriastrad
1511.1Sriastradstatic int
1521.1Sriastradrandom_poll(dev_t dev, int events, struct lwp *l)
1531.1Sriastrad{
1541.1Sriastrad
1551.1Sriastrad	/* /dev/random may block; /dev/urandom is always ready.  */
1561.1Sriastrad	switch (minor(dev)) {
1571.1Sriastrad	case RND_DEV_RANDOM:
1581.1Sriastrad		return entropy_poll(events);
1591.1Sriastrad	case RND_DEV_URANDOM:
1601.1Sriastrad		return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM);
1611.1Sriastrad	default:
1621.1Sriastrad		return 0;
1631.1Sriastrad	}
1641.1Sriastrad}
1651.1Sriastrad
1661.1Sriastradstatic int
1671.1Sriastradrandom_kqfilter(dev_t dev, struct knote *kn)
1681.1Sriastrad{
1691.1Sriastrad
1701.1Sriastrad	/* Validate the event filter.  */
1711.1Sriastrad	switch (kn->kn_filter) {
1721.1Sriastrad	case EVFILT_READ:
1731.1Sriastrad	case EVFILT_WRITE:
1741.1Sriastrad		break;
1751.1Sriastrad	default:
1761.1Sriastrad		return EINVAL;
1771.1Sriastrad	}
1781.1Sriastrad
1791.1Sriastrad	/* /dev/random may block; /dev/urandom never does.  */
1801.1Sriastrad	switch (minor(dev)) {
1811.1Sriastrad	case RND_DEV_RANDOM:
1821.1Sriastrad		if (kn->kn_filter == EVFILT_READ)
1831.1Sriastrad			return entropy_kqfilter(kn);
1841.1Sriastrad		/* FALLTHROUGH */
1851.1Sriastrad	case RND_DEV_URANDOM:
1861.1Sriastrad		kn->kn_fop = &seltrue_filtops;
1871.1Sriastrad		return 0;
1881.1Sriastrad	default:
1891.1Sriastrad		return ENXIO;
1901.1Sriastrad	}
1911.1Sriastrad}
1921.1Sriastrad
1931.1Sriastrad/*
1941.1Sriastrad * random_read(dev, uio, flags)
1951.1Sriastrad *
1961.1Sriastrad *	Generate data from a PRNG seeded from the entropy pool.
1971.1Sriastrad *
1981.1Sriastrad *	- If /dev/random, block until we have full entropy, or fail
1991.1Sriastrad *	  with EWOULDBLOCK, and if `depleting' entropy, return at most
2001.1Sriastrad *	  the entropy pool's capacity at once.
2011.1Sriastrad *
2021.1Sriastrad *	- If /dev/urandom, generate data from whatever is in the
2031.1Sriastrad *	  entropy pool now.
2041.1Sriastrad *
2051.1Sriastrad *	On interrupt, return a short read, but not shorter than 256
2061.1Sriastrad *	bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is
2071.1Sriastrad *	512 for hysterical raisins).
2081.1Sriastrad */
2091.1Sriastradstatic int
2101.1Sriastradrandom_read(dev_t dev, struct uio *uio, int flags)
2111.1Sriastrad{
2121.1Sriastrad	uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0};
2131.1Sriastrad	struct nist_hash_drbg drbg;
2141.1Sriastrad	uint8_t *buf;
2151.1Sriastrad	int extractflags;
2161.1Sriastrad	int error;
2171.1Sriastrad
2181.1Sriastrad	/* Get a buffer for transfers.  */
2191.5Sriastrad	buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP);
2201.1Sriastrad
2211.1Sriastrad	/*
2221.1Sriastrad	 * If it's a short read from /dev/urandom, just generate the
2231.1Sriastrad	 * output directly with per-CPU cprng_strong.
2241.1Sriastrad	 */
2251.1Sriastrad	if (minor(dev) == RND_DEV_URANDOM &&
2261.1Sriastrad	    uio->uio_resid <= RANDOM_BUFSIZE) {
2271.1Sriastrad		/* Generate data and transfer it out.  */
2281.1Sriastrad		cprng_strong(user_cprng, buf, uio->uio_resid, 0);
2291.1Sriastrad		error = uiomove(buf, uio->uio_resid, uio);
2301.1Sriastrad		goto out;
2311.1Sriastrad	}
2321.1Sriastrad
2331.1Sriastrad	/*
2341.1Sriastrad	 * If we're doing a blocking read from /dev/random, wait
2351.1Sriastrad	 * interruptibly.  Otherwise, don't wait.
2361.1Sriastrad	 */
2371.1Sriastrad	if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK))
2381.1Sriastrad		extractflags = ENTROPY_WAIT|ENTROPY_SIG;
2391.1Sriastrad	else
2401.1Sriastrad		extractflags = 0;
2411.1Sriastrad
2421.1Sriastrad	/*
2431.1Sriastrad	 * Query the entropy pool.  For /dev/random, stop here if this
2441.1Sriastrad	 * fails.  For /dev/urandom, go on either way --
2451.1Sriastrad	 * entropy_extract will always fill the buffer with what we
2461.1Sriastrad	 * have from the global pool.
2471.1Sriastrad	 */
2481.1Sriastrad	error = entropy_extract(seed, sizeof seed, extractflags);
2491.1Sriastrad	if (minor(dev) == RND_DEV_RANDOM && error)
2501.1Sriastrad		goto out;
2511.1Sriastrad
2521.1Sriastrad	/* Instantiate the DRBG.  */
2531.1Sriastrad	if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0,
2541.1Sriastrad		NULL, 0))
2551.1Sriastrad		panic("nist_hash_drbg_instantiate");
2561.1Sriastrad
2571.1Sriastrad	/* Promptly zero the seed.  */
2581.1Sriastrad	explicit_memset(seed, 0, sizeof seed);
2591.1Sriastrad
2601.6Sriastrad	/* Generate data.  */
2611.1Sriastrad	error = 0;
2621.1Sriastrad	while (uio->uio_resid) {
2631.6Sriastrad		size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
2641.1Sriastrad
2651.1Sriastrad		/*
2661.4Sriastrad		 * Clamp /dev/random output to the entropy capacity and
2671.4Sriastrad		 * seed size.  Programs can't rely on long reads.
2681.1Sriastrad		 */
2691.4Sriastrad		if (minor(dev) == RND_DEV_RANDOM) {
2701.1Sriastrad			n = MIN(n, ENTROPY_CAPACITY);
2711.1Sriastrad			n = MIN(n, sizeof seed);
2721.1Sriastrad			/*
2731.1Sriastrad			 * Guarantee never to return more than one
2741.1Sriastrad			 * buffer in this case to minimize bookkeeping.
2751.1Sriastrad			 */
2761.1Sriastrad			CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE);
2771.1Sriastrad			CTASSERT(sizeof seed <= RANDOM_BUFSIZE);
2781.1Sriastrad		}
2791.1Sriastrad
2801.1Sriastrad		/*
2811.1Sriastrad		 * Try to generate a block of data, but if we've hit
2821.1Sriastrad		 * the DRBG reseed interval, reseed.
2831.1Sriastrad		 */
2841.1Sriastrad		if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) {
2851.1Sriastrad			/*
2861.1Sriastrad			 * Get a fresh seed without blocking -- we have
2871.1Sriastrad			 * already generated some output so it is not
2881.1Sriastrad			 * useful to block.  This can fail only if the
2891.1Sriastrad			 * request is obscenely large, so it is OK for
2901.1Sriastrad			 * either /dev/random or /dev/urandom to fail:
2911.1Sriastrad			 * we make no promises about gigabyte-sized
2921.1Sriastrad			 * reads happening all at once.
2931.1Sriastrad			 */
2941.1Sriastrad			error = entropy_extract(seed, sizeof seed, 0);
2951.1Sriastrad			if (error)
2961.1Sriastrad				break;
2971.1Sriastrad
2981.1Sriastrad			/* Reseed and try again.  */
2991.1Sriastrad			if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed,
3001.1Sriastrad				NULL, 0))
3011.1Sriastrad				panic("nist_hash_drbg_reseed");
3021.1Sriastrad
3031.1Sriastrad			/* Promptly zero the seed.  */
3041.1Sriastrad			explicit_memset(seed, 0, sizeof seed);
3051.1Sriastrad
3061.1Sriastrad			/* If it fails now, that's a bug.  */
3071.1Sriastrad			if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0))
3081.1Sriastrad				panic("nist_hash_drbg_generate");
3091.1Sriastrad		}
3101.1Sriastrad
3111.1Sriastrad		/* Transfer n bytes out.  */
3121.1Sriastrad		error = uiomove(buf, n, uio);
3131.1Sriastrad		if (error)
3141.1Sriastrad			break;
3151.1Sriastrad
3161.1Sriastrad		/*
3171.4Sriastrad		 * If this is /dev/random, stop here, return what we
3181.4Sriastrad		 * have, and force the next read to reseed.  Programs
3191.4Sriastrad		 * can't rely on /dev/random for long reads.
3201.1Sriastrad		 */
3211.4Sriastrad		if (minor(dev) == RND_DEV_RANDOM) {
3221.1Sriastrad			error = 0;
3231.1Sriastrad			break;
3241.1Sriastrad		}
3251.1Sriastrad
3261.6Sriastrad		/* Yield if requested.  */
3271.6Sriastrad		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
3281.6Sriastrad			preempt();
3291.6Sriastrad
3301.6Sriastrad		/* Check for interruption after at least 256 bytes.  */
3311.6Sriastrad		CTASSERT(RANDOM_BUFSIZE >= 256);
3321.6Sriastrad		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
3331.6Sriastrad		    sigispending(curlwp, 0)) {
3341.6Sriastrad			error = EINTR;
3351.6Sriastrad			break;
3361.6Sriastrad		}
3371.1Sriastrad	}
3381.1Sriastrad
3391.5Sriastradout:	/* Zero the buffer and free it.  */
3401.1Sriastrad	explicit_memset(buf, 0, RANDOM_BUFSIZE);
3411.5Sriastrad	kmem_free(buf, RANDOM_BUFSIZE);
3421.1Sriastrad
3431.1Sriastrad	return error;
3441.1Sriastrad}
3451.1Sriastrad
3461.1Sriastrad/*
3471.1Sriastrad * random_write(dev, uio, flags)
3481.1Sriastrad *
3491.1Sriastrad *	Enter data from uio into the entropy pool.
3501.1Sriastrad *
3511.1Sriastrad *	Assume privileged users provide full entropy, and unprivileged
3521.1Sriastrad *	users provide no entropy.  If you have a nonuniform source of
3531.1Sriastrad *	data with n bytes of min-entropy, hash it with an XOF like
3541.1Sriastrad *	SHAKE128 into exactly n bytes first.
3551.1Sriastrad */
3561.1Sriastradstatic int
3571.1Sriastradrandom_write(dev_t dev, struct uio *uio, int flags)
3581.1Sriastrad{
3591.1Sriastrad	kauth_cred_t cred = kauth_cred_get();
3601.1Sriastrad	uint8_t *buf;
3611.3Sriastrad	bool privileged = false, any = false;
3621.1Sriastrad	int error = 0;
3631.1Sriastrad
3641.1Sriastrad	/* Verify user's authorization to affect the entropy pool.  */
3651.1Sriastrad	error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA,
3661.1Sriastrad	    NULL, NULL, NULL, NULL);
3671.1Sriastrad	if (error)
3681.1Sriastrad		return error;
3691.1Sriastrad
3701.1Sriastrad	/*
3711.1Sriastrad	 * Check whether user is privileged.  If so, assume user
3721.1Sriastrad	 * furnishes full-entropy data; if not, accept user's data but
3731.1Sriastrad	 * assume it has zero entropy when we do accounting.  If you
3741.1Sriastrad	 * want to specify less entropy, use ioctl(RNDADDDATA).
3751.1Sriastrad	 */
3761.1Sriastrad	if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
3771.1Sriastrad		NULL, NULL, NULL, NULL) == 0)
3781.1Sriastrad		privileged = true;
3791.1Sriastrad
3801.1Sriastrad	/* Get a buffer for transfers.  */
3811.5Sriastrad	buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP);
3821.1Sriastrad
3831.1Sriastrad	/* Consume data.  */
3841.1Sriastrad	while (uio->uio_resid) {
3851.6Sriastrad		size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
3861.1Sriastrad
3871.6Sriastrad		/* Transfer n bytes in and enter them into the pool.  */
3881.6Sriastrad		error = uiomove(buf, n, uio);
3891.6Sriastrad		if (error)
3901.6Sriastrad			break;
3911.6Sriastrad		rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0);
3921.6Sriastrad		any = true;
3931.1Sriastrad
3941.1Sriastrad		/* Yield if requested.  */
3951.1Sriastrad		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
3961.1Sriastrad			preempt();
3971.1Sriastrad
3981.1Sriastrad		/* Check for interruption.  */
3991.1Sriastrad		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
4001.1Sriastrad		    sigispending(curlwp, 0)) {
4011.7Sriastrad			error = EINTR;
4021.1Sriastrad			break;
4031.1Sriastrad		}
4041.1Sriastrad	}
4051.1Sriastrad
4061.5Sriastrad	/* Zero the buffer and free it.  */
4071.1Sriastrad	explicit_memset(buf, 0, RANDOM_BUFSIZE);
4081.5Sriastrad	kmem_free(buf, RANDOM_BUFSIZE);
4091.3Sriastrad
4101.3Sriastrad	/* If we added anything, consolidate entropy now.  */
4111.3Sriastrad	if (any)
4121.3Sriastrad		entropy_consolidate();
4131.3Sriastrad
4141.1Sriastrad	return error;
4151.1Sriastrad}
416