random.c revision 1.3
1/* $NetBSD: random.c,v 1.3 2020/05/07 19:05:51 riastradh Exp $ */ 2 3/*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * /dev/random, /dev/urandom -- stateless version 34 * 35 * For short reads from /dev/urandom, up to 256 bytes, read from a 36 * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the 37 * system has enough entropy. 38 * 39 * For all other reads, instantiate a fresh NIST Hash_DRBG from 40 * the global entropy pool, and draw from it. 41 * 42 * Each read is independent; there is no per-open state. 43 * Concurrent reads from the same open run in parallel. 44 * 45 * Reading from /dev/random may block until entropy is available. 46 * Either device may return short reads if interrupted. 47 */ 48 49#include <sys/cdefs.h> 50__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.3 2020/05/07 19:05:51 riastradh Exp $"); 51 52#include <sys/param.h> 53#include <sys/types.h> 54#include <sys/atomic.h> 55#include <sys/conf.h> 56#include <sys/cprng.h> 57#include <sys/entropy.h> 58#include <sys/errno.h> 59#include <sys/event.h> 60#include <sys/fcntl.h> 61#include <sys/kauth.h> 62#include <sys/lwp.h> 63#include <sys/poll.h> 64#include <sys/pool.h> 65#include <sys/rnd.h> 66#include <sys/rndsource.h> 67#include <sys/signalvar.h> 68#include <sys/systm.h> 69 70#include <crypto/nist_hash_drbg/nist_hash_drbg.h> 71 72#include "ioconf.h" 73 74static dev_type_open(random_open); 75static dev_type_close(random_close); 76static dev_type_ioctl(random_ioctl); 77static dev_type_poll(random_poll); 78static dev_type_kqfilter(random_kqfilter); 79static dev_type_read(random_read); 80static dev_type_write(random_write); 81 82const struct cdevsw rnd_cdevsw = { 83 .d_open = random_open, 84 .d_close = random_close, 85 .d_read = random_read, 86 .d_write = random_write, 87 .d_ioctl = random_ioctl, 88 .d_stop = nostop, 89 .d_tty = notty, 90 .d_poll = random_poll, 91 .d_mmap = nommap, 92 .d_kqfilter = random_kqfilter, 93 .d_discard = nodiscard, 94 .d_flag = D_OTHER|D_MPSAFE, 95}; 96 97#define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ 98static pool_cache_t random_buf_pc __read_mostly; 99 100/* Entropy source for writes to /dev/random and /dev/urandom */ 101static krndsource_t user_rndsource; 102 103void 104rndattach(int num) 105{ 106 107 random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0, 108 "randombuf", NULL, IPL_NONE, NULL, NULL, NULL); 109 rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, 110 RND_FLAG_COLLECT_VALUE); 111} 112 113static int 114random_open(dev_t dev, int flags, int fmt, struct lwp *l) 115{ 116 117 /* Validate minor. */ 118 switch (minor(dev)) { 119 case RND_DEV_RANDOM: 120 case RND_DEV_URANDOM: 121 break; 122 default: 123 return ENXIO; 124 } 125 126 return 0; 127} 128 129static int 130random_close(dev_t dev, int flags, int fmt, struct lwp *l) 131{ 132 133 /* Success! */ 134 return 0; 135} 136 137static int 138random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) 139{ 140 141 /* 142 * No non-blocking/async options; otherwise defer to 143 * entropy_ioctl. 144 */ 145 switch (cmd) { 146 case FIONBIO: 147 case FIOASYNC: 148 return 0; 149 default: 150 return entropy_ioctl(cmd, data); 151 } 152} 153 154static int 155random_poll(dev_t dev, int events, struct lwp *l) 156{ 157 158 /* /dev/random may block; /dev/urandom is always ready. */ 159 switch (minor(dev)) { 160 case RND_DEV_RANDOM: 161 return entropy_poll(events); 162 case RND_DEV_URANDOM: 163 return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); 164 default: 165 return 0; 166 } 167} 168 169static int 170random_kqfilter(dev_t dev, struct knote *kn) 171{ 172 173 /* Validate the event filter. */ 174 switch (kn->kn_filter) { 175 case EVFILT_READ: 176 case EVFILT_WRITE: 177 break; 178 default: 179 return EINVAL; 180 } 181 182 /* /dev/random may block; /dev/urandom never does. */ 183 switch (minor(dev)) { 184 case RND_DEV_RANDOM: 185 if (kn->kn_filter == EVFILT_READ) 186 return entropy_kqfilter(kn); 187 /* FALLTHROUGH */ 188 case RND_DEV_URANDOM: 189 kn->kn_fop = &seltrue_filtops; 190 return 0; 191 default: 192 return ENXIO; 193 } 194} 195 196/* 197 * random_read(dev, uio, flags) 198 * 199 * Generate data from a PRNG seeded from the entropy pool. 200 * 201 * - If /dev/random, block until we have full entropy, or fail 202 * with EWOULDBLOCK, and if `depleting' entropy, return at most 203 * the entropy pool's capacity at once. 204 * 205 * - If /dev/urandom, generate data from whatever is in the 206 * entropy pool now. 207 * 208 * On interrupt, return a short read, but not shorter than 256 209 * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is 210 * 512 for hysterical raisins). 211 */ 212static int 213random_read(dev_t dev, struct uio *uio, int flags) 214{ 215 uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; 216 struct nist_hash_drbg drbg; 217 uint8_t *buf; 218 int extractflags; 219 bool interruptible; 220 int error; 221 222 /* Get a buffer for transfers. */ 223 buf = pool_cache_get(random_buf_pc, PR_WAITOK); 224 225 /* 226 * If it's a short read from /dev/urandom, just generate the 227 * output directly with per-CPU cprng_strong. 228 */ 229 if (minor(dev) == RND_DEV_URANDOM && 230 uio->uio_resid <= RANDOM_BUFSIZE) { 231 /* Generate data and transfer it out. */ 232 cprng_strong(user_cprng, buf, uio->uio_resid, 0); 233 error = uiomove(buf, uio->uio_resid, uio); 234 goto out; 235 } 236 237 /* 238 * If we're doing a blocking read from /dev/random, wait 239 * interruptibly. Otherwise, don't wait. 240 */ 241 if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK)) 242 extractflags = ENTROPY_WAIT|ENTROPY_SIG; 243 else 244 extractflags = 0; 245 246 /* 247 * Query the entropy pool. For /dev/random, stop here if this 248 * fails. For /dev/urandom, go on either way -- 249 * entropy_extract will always fill the buffer with what we 250 * have from the global pool. 251 */ 252 error = entropy_extract(seed, sizeof seed, extractflags); 253 if (minor(dev) == RND_DEV_RANDOM && error) 254 goto out; 255 256 /* Instantiate the DRBG. */ 257 if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0, 258 NULL, 0)) 259 panic("nist_hash_drbg_instantiate"); 260 261 /* Promptly zero the seed. */ 262 explicit_memset(seed, 0, sizeof seed); 263 264 /* 265 * Generate data. Assume no error until failure. No 266 * interruption at this point until we've generated at least 267 * one block of output. 268 */ 269 error = 0; 270 interruptible = false; 271 while (uio->uio_resid) { 272 size_t n = uio->uio_resid; 273 274 /* No more than one buffer's worth. */ 275 n = MIN(n, RANDOM_BUFSIZE); 276 277 /* 278 * If we're `depleting' and this is /dev/random, clamp 279 * to the smaller of the entropy capacity or the seed. 280 */ 281 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 282 minor(dev) == RND_DEV_RANDOM) { 283 n = MIN(n, ENTROPY_CAPACITY); 284 n = MIN(n, sizeof seed); 285 /* 286 * Guarantee never to return more than one 287 * buffer in this case to minimize bookkeeping. 288 */ 289 CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE); 290 CTASSERT(sizeof seed <= RANDOM_BUFSIZE); 291 } 292 293 /* Yield if requested. */ 294 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 295 preempt(); 296 297 /* 298 * Allow interruption, but only after providing a 299 * minimum number of bytes. 300 */ 301 CTASSERT(RANDOM_BUFSIZE >= 256); 302 /* Check for interruption. */ 303 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 304 interruptible && sigispending(curlwp, 0)) { 305 error = EINTR; /* XXX ERESTART? */ 306 break; 307 } 308 309 /* 310 * Try to generate a block of data, but if we've hit 311 * the DRBG reseed interval, reseed. 312 */ 313 if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) { 314 /* 315 * Get a fresh seed without blocking -- we have 316 * already generated some output so it is not 317 * useful to block. This can fail only if the 318 * request is obscenely large, so it is OK for 319 * either /dev/random or /dev/urandom to fail: 320 * we make no promises about gigabyte-sized 321 * reads happening all at once. 322 */ 323 error = entropy_extract(seed, sizeof seed, 0); 324 if (error) 325 break; 326 327 /* Reseed and try again. */ 328 if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed, 329 NULL, 0)) 330 panic("nist_hash_drbg_reseed"); 331 332 /* Promptly zero the seed. */ 333 explicit_memset(seed, 0, sizeof seed); 334 335 /* If it fails now, that's a bug. */ 336 if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) 337 panic("nist_hash_drbg_generate"); 338 } 339 340 /* Transfer n bytes out. */ 341 error = uiomove(buf, n, uio); 342 if (error) 343 break; 344 345 /* 346 * If we're `depleting' and this is /dev/random, stop 347 * here, return what we have, and force the next read 348 * to reseed. Could grab more from the pool if 349 * possible without blocking, but that's more 350 * work. 351 */ 352 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 353 minor(dev) == RND_DEV_RANDOM) { 354 error = 0; 355 break; 356 } 357 358 /* 359 * We have generated one block of output, so it is 360 * reasonable to allow interruption after this point. 361 */ 362 interruptible = true; 363 } 364 365out: /* Zero the buffer and return it to the pool cache. */ 366 explicit_memset(buf, 0, RANDOM_BUFSIZE); 367 pool_cache_put(random_buf_pc, buf); 368 369 return error; 370} 371 372/* 373 * random_write(dev, uio, flags) 374 * 375 * Enter data from uio into the entropy pool. 376 * 377 * Assume privileged users provide full entropy, and unprivileged 378 * users provide no entropy. If you have a nonuniform source of 379 * data with n bytes of min-entropy, hash it with an XOF like 380 * SHAKE128 into exactly n bytes first. 381 */ 382static int 383random_write(dev_t dev, struct uio *uio, int flags) 384{ 385 kauth_cred_t cred = kauth_cred_get(); 386 uint8_t *buf; 387 bool privileged = false, any = false; 388 int error = 0; 389 390 /* Verify user's authorization to affect the entropy pool. */ 391 error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, 392 NULL, NULL, NULL, NULL); 393 if (error) 394 return error; 395 396 /* 397 * Check whether user is privileged. If so, assume user 398 * furnishes full-entropy data; if not, accept user's data but 399 * assume it has zero entropy when we do accounting. If you 400 * want to specify less entropy, use ioctl(RNDADDDATA). 401 */ 402 if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 403 NULL, NULL, NULL, NULL) == 0) 404 privileged = true; 405 406 /* Get a buffer for transfers. */ 407 buf = pool_cache_get(random_buf_pc, PR_WAITOK); 408 409 /* Consume data. */ 410 while (uio->uio_resid) { 411 size_t n = uio->uio_resid; 412 413 /* No more than one buffer's worth in one step. */ 414 n = MIN(uio->uio_resid, RANDOM_BUFSIZE); 415 416 /* Yield if requested. */ 417 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 418 preempt(); 419 420 /* Check for interruption. */ 421 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 422 sigispending(curlwp, 0)) { 423 error = EINTR; /* XXX ERESTART? */ 424 break; 425 } 426 427 /* Transfer n bytes in and enter them into the pool. */ 428 error = uiomove(buf, n, uio); 429 if (error) 430 break; 431 rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); 432 any = true; 433 } 434 435 /* Zero the buffer and return it to the pool cache. */ 436 explicit_memset(buf, 0, RANDOM_BUFSIZE); 437 pool_cache_put(random_buf_pc, buf); 438 439 /* If we added anything, consolidate entropy now. */ 440 if (any) 441 entropy_consolidate(); 442 443 return error; 444} 445