random.c revision 1.1
1/*	$NetBSD: random.c,v 1.1 2020/04/30 03:28:18 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * /dev/random, /dev/urandom -- stateless version
34 *
35 *	For short reads from /dev/urandom, up to 256 bytes, read from a
36 *	per-CPU NIST Hash_DRBG instance that is reseeded as soon as the
37 *	system has enough entropy.
38 *
39 *	For all other reads, instantiate a fresh NIST Hash_DRBG from
40 *	the global entropy pool, and draw from it.
41 *
42 *	Each read is independent; there is no per-open state.
43 *	Concurrent reads from the same open run in parallel.
44 *
45 *	Reading from /dev/random may block until entropy is available.
46 *	Either device may return short reads if interrupted.
47 */
48
49#include <sys/cdefs.h>
50__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.1 2020/04/30 03:28:18 riastradh Exp $");
51
52#include <sys/param.h>
53#include <sys/types.h>
54#include <sys/conf.h>
55#include <sys/cprng.h>
56#include <sys/entropy.h>
57#include <sys/errno.h>
58#include <sys/event.h>
59#include <sys/fcntl.h>
60#include <sys/kauth.h>
61#include <sys/lwp.h>
62#include <sys/poll.h>
63#include <sys/pool.h>
64#include <sys/rnd.h>
65#include <sys/rndsource.h>
66#include <sys/signalvar.h>
67#include <sys/systm.h>
68
69#include <crypto/nist_hash_drbg/nist_hash_drbg.h>
70
71#include "ioconf.h"
72
73static dev_type_open(random_open);
74static dev_type_close(random_close);
75static dev_type_ioctl(random_ioctl);
76static dev_type_poll(random_poll);
77static dev_type_kqfilter(random_kqfilter);
78static dev_type_read(random_read);
79static dev_type_write(random_write);
80
81const struct cdevsw rnd_cdevsw = {
82	.d_open = random_open,
83	.d_close = random_close,
84	.d_read = random_read,
85	.d_write = random_write,
86	.d_ioctl = random_ioctl,
87	.d_stop = nostop,
88	.d_tty = notty,
89	.d_poll = random_poll,
90	.d_mmap = nommap,
91	.d_kqfilter = random_kqfilter,
92	.d_discard = nodiscard,
93	.d_flag = D_OTHER|D_MPSAFE,
94};
95
96#define	RANDOM_BUFSIZE	512	/* XXX pulled from arse */
97static pool_cache_t random_buf_pc __read_mostly;
98
99/* Entropy source for writes to /dev/random and /dev/urandom */
100static krndsource_t	user_rndsource;
101
102void
103rndattach(int num)
104{
105
106	random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0,
107	    "randombuf", NULL, IPL_NONE, NULL, NULL, NULL);
108	rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN,
109	    RND_FLAG_COLLECT_VALUE);
110}
111
112static int
113random_open(dev_t dev, int flags, int fmt, struct lwp *l)
114{
115
116	/* Validate minor.  */
117	switch (minor(dev)) {
118	case RND_DEV_RANDOM:
119	case RND_DEV_URANDOM:
120		break;
121	default:
122		return ENXIO;
123	}
124
125	return 0;
126}
127
128static int
129random_close(dev_t dev, int flags, int fmt, struct lwp *l)
130{
131
132	/* Success!  */
133	return 0;
134}
135
136static int
137random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l)
138{
139
140	/*
141	 * No non-blocking/async options; otherwise defer to
142	 * entropy_ioctl.
143	 */
144	switch (cmd) {
145	case FIONBIO:
146	case FIOASYNC:
147		return 0;
148	default:
149		return entropy_ioctl(cmd, data);
150	}
151}
152
153static int
154random_poll(dev_t dev, int events, struct lwp *l)
155{
156
157	/* /dev/random may block; /dev/urandom is always ready.  */
158	switch (minor(dev)) {
159	case RND_DEV_RANDOM:
160		return entropy_poll(events);
161	case RND_DEV_URANDOM:
162		return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM);
163	default:
164		return 0;
165	}
166}
167
168static int
169random_kqfilter(dev_t dev, struct knote *kn)
170{
171
172	/* Validate the event filter.  */
173	switch (kn->kn_filter) {
174	case EVFILT_READ:
175	case EVFILT_WRITE:
176		break;
177	default:
178		return EINVAL;
179	}
180
181	/* /dev/random may block; /dev/urandom never does.  */
182	switch (minor(dev)) {
183	case RND_DEV_RANDOM:
184		if (kn->kn_filter == EVFILT_READ)
185			return entropy_kqfilter(kn);
186		/* FALLTHROUGH */
187	case RND_DEV_URANDOM:
188		kn->kn_fop = &seltrue_filtops;
189		return 0;
190	default:
191		return ENXIO;
192	}
193}
194
195/*
196 * random_read(dev, uio, flags)
197 *
198 *	Generate data from a PRNG seeded from the entropy pool.
199 *
200 *	- If /dev/random, block until we have full entropy, or fail
201 *	  with EWOULDBLOCK, and if `depleting' entropy, return at most
202 *	  the entropy pool's capacity at once.
203 *
204 *	- If /dev/urandom, generate data from whatever is in the
205 *	  entropy pool now.
206 *
207 *	On interrupt, return a short read, but not shorter than 256
208 *	bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is
209 *	512 for hysterical raisins).
210 */
211static int
212random_read(dev_t dev, struct uio *uio, int flags)
213{
214	uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0};
215	struct nist_hash_drbg drbg;
216	uint8_t *buf;
217	int extractflags;
218	bool interruptible;
219	int error;
220
221	/* Get a buffer for transfers.  */
222	buf = pool_cache_get(random_buf_pc, PR_WAITOK);
223
224	/*
225	 * If it's a short read from /dev/urandom, just generate the
226	 * output directly with per-CPU cprng_strong.
227	 */
228	if (minor(dev) == RND_DEV_URANDOM &&
229	    uio->uio_resid <= RANDOM_BUFSIZE) {
230		/* Generate data and transfer it out.  */
231		cprng_strong(user_cprng, buf, uio->uio_resid, 0);
232		error = uiomove(buf, uio->uio_resid, uio);
233		goto out;
234	}
235
236	/*
237	 * If we're doing a blocking read from /dev/random, wait
238	 * interruptibly.  Otherwise, don't wait.
239	 */
240	if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK))
241		extractflags = ENTROPY_WAIT|ENTROPY_SIG;
242	else
243		extractflags = 0;
244
245	/*
246	 * Query the entropy pool.  For /dev/random, stop here if this
247	 * fails.  For /dev/urandom, go on either way --
248	 * entropy_extract will always fill the buffer with what we
249	 * have from the global pool.
250	 */
251	error = entropy_extract(seed, sizeof seed, extractflags);
252	if (minor(dev) == RND_DEV_RANDOM && error)
253		goto out;
254
255	/* Instantiate the DRBG.  */
256	if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0,
257		NULL, 0))
258		panic("nist_hash_drbg_instantiate");
259
260	/* Promptly zero the seed.  */
261	explicit_memset(seed, 0, sizeof seed);
262
263	/*
264	 * Generate data.  Assume no error until failure.  No
265	 * interruption at this point until we've generated at least
266	 * one block of output.
267	 */
268	error = 0;
269	interruptible = false;
270	while (uio->uio_resid) {
271		size_t n = uio->uio_resid;
272
273		/* No more than one buffer's worth.  */
274		n = MIN(n, RANDOM_BUFSIZE);
275
276		/*
277		 * If we're `depleting' and this is /dev/random, clamp
278		 * to the smaller of the entropy capacity or the seed.
279		 */
280		if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
281		    minor(dev) == RND_DEV_RANDOM) {
282			n = MIN(n, ENTROPY_CAPACITY);
283			n = MIN(n, sizeof seed);
284			/*
285			 * Guarantee never to return more than one
286			 * buffer in this case to minimize bookkeeping.
287			 */
288			CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE);
289			CTASSERT(sizeof seed <= RANDOM_BUFSIZE);
290		}
291
292		/* Yield if requested.  */
293		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
294			preempt();
295
296		/*
297		 * Allow interruption, but only after providing a
298		 * minimum number of bytes.
299		 */
300		CTASSERT(RANDOM_BUFSIZE >= 256);
301		/* Check for interruption.  */
302		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
303		    interruptible && sigispending(curlwp, 0)) {
304			error = EINTR; /* XXX ERESTART? */
305			break;
306		}
307
308		/*
309		 * Try to generate a block of data, but if we've hit
310		 * the DRBG reseed interval, reseed.
311		 */
312		if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) {
313			/*
314			 * Get a fresh seed without blocking -- we have
315			 * already generated some output so it is not
316			 * useful to block.  This can fail only if the
317			 * request is obscenely large, so it is OK for
318			 * either /dev/random or /dev/urandom to fail:
319			 * we make no promises about gigabyte-sized
320			 * reads happening all at once.
321			 */
322			error = entropy_extract(seed, sizeof seed, 0);
323			if (error)
324				break;
325
326			/* Reseed and try again.  */
327			if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed,
328				NULL, 0))
329				panic("nist_hash_drbg_reseed");
330
331			/* Promptly zero the seed.  */
332			explicit_memset(seed, 0, sizeof seed);
333
334			/* If it fails now, that's a bug.  */
335			if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0))
336				panic("nist_hash_drbg_generate");
337		}
338
339		/* Transfer n bytes out.  */
340		error = uiomove(buf, n, uio);
341		if (error)
342			break;
343
344		/*
345		 * If we're `depleting' and this is /dev/random, stop
346		 * here, return what we have, and force the next read
347		 * to reseed.  Could grab more from the pool if
348		 * possible without blocking, but that's more
349		 * work.
350		 */
351		if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
352		    minor(dev) == RND_DEV_RANDOM) {
353			error = 0;
354			break;
355		}
356
357		/*
358		 * We have generated one block of output, so it is
359		 * reasonable to allow interruption after this point.
360		 */
361		interruptible = true;
362	}
363
364out:	/* Zero the buffer and return it to the pool cache.  */
365	explicit_memset(buf, 0, RANDOM_BUFSIZE);
366	pool_cache_put(random_buf_pc, buf);
367
368	return error;
369}
370
371/*
372 * random_write(dev, uio, flags)
373 *
374 *	Enter data from uio into the entropy pool.
375 *
376 *	Assume privileged users provide full entropy, and unprivileged
377 *	users provide no entropy.  If you have a nonuniform source of
378 *	data with n bytes of min-entropy, hash it with an XOF like
379 *	SHAKE128 into exactly n bytes first.
380 */
381static int
382random_write(dev_t dev, struct uio *uio, int flags)
383{
384	kauth_cred_t cred = kauth_cred_get();
385	uint8_t *buf;
386	bool privileged = false;
387	int error = 0;
388
389	/* Verify user's authorization to affect the entropy pool.  */
390	error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA,
391	    NULL, NULL, NULL, NULL);
392	if (error)
393		return error;
394
395	/*
396	 * Check whether user is privileged.  If so, assume user
397	 * furnishes full-entropy data; if not, accept user's data but
398	 * assume it has zero entropy when we do accounting.  If you
399	 * want to specify less entropy, use ioctl(RNDADDDATA).
400	 */
401	if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
402		NULL, NULL, NULL, NULL) == 0)
403		privileged = true;
404
405	/* Get a buffer for transfers.  */
406	buf = pool_cache_get(random_buf_pc, PR_WAITOK);
407
408	/* Consume data.  */
409	while (uio->uio_resid) {
410		size_t n = uio->uio_resid;
411
412		/* No more than one buffer's worth in one step.  */
413		n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
414
415		/* Yield if requested.  */
416		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
417			preempt();
418
419		/* Check for interruption.  */
420		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
421		    sigispending(curlwp, 0)) {
422			error = EINTR; /* XXX ERESTART?  */
423			break;
424		}
425
426		/* Transfer n bytes in and enter them into the pool.  */
427		error = uiomove(buf, n, uio);
428		if (error)
429			break;
430		rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0);
431	}
432
433	/* Zero the buffer and return it to the pool cache.  */
434	explicit_memset(buf, 0, RANDOM_BUFSIZE);
435	pool_cache_put(random_buf_pc, buf);
436	return error;
437}
438