random.c revision 1.4
1/* $NetBSD: random.c,v 1.4 2020/05/08 15:53:26 riastradh Exp $ */ 2 3/*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * /dev/random, /dev/urandom -- stateless version 34 * 35 * For short reads from /dev/urandom, up to 256 bytes, read from a 36 * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the 37 * system has enough entropy. 38 * 39 * For all other reads, instantiate a fresh NIST Hash_DRBG from 40 * the global entropy pool, and draw from it. 41 * 42 * Each read is independent; there is no per-open state. 43 * Concurrent reads from the same open run in parallel. 44 * 45 * Reading from /dev/random may block until entropy is available. 46 * Either device may return short reads if interrupted. 47 */ 48 49#include <sys/cdefs.h> 50__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.4 2020/05/08 15:53:26 riastradh Exp $"); 51 52#include <sys/param.h> 53#include <sys/types.h> 54#include <sys/atomic.h> 55#include <sys/conf.h> 56#include <sys/cprng.h> 57#include <sys/entropy.h> 58#include <sys/errno.h> 59#include <sys/event.h> 60#include <sys/fcntl.h> 61#include <sys/kauth.h> 62#include <sys/lwp.h> 63#include <sys/poll.h> 64#include <sys/pool.h> 65#include <sys/rnd.h> 66#include <sys/rndsource.h> 67#include <sys/signalvar.h> 68#include <sys/systm.h> 69 70#include <crypto/nist_hash_drbg/nist_hash_drbg.h> 71 72#include "ioconf.h" 73 74static dev_type_open(random_open); 75static dev_type_close(random_close); 76static dev_type_ioctl(random_ioctl); 77static dev_type_poll(random_poll); 78static dev_type_kqfilter(random_kqfilter); 79static dev_type_read(random_read); 80static dev_type_write(random_write); 81 82const struct cdevsw rnd_cdevsw = { 83 .d_open = random_open, 84 .d_close = random_close, 85 .d_read = random_read, 86 .d_write = random_write, 87 .d_ioctl = random_ioctl, 88 .d_stop = nostop, 89 .d_tty = notty, 90 .d_poll = random_poll, 91 .d_mmap = nommap, 92 .d_kqfilter = random_kqfilter, 93 .d_discard = nodiscard, 94 .d_flag = D_OTHER|D_MPSAFE, 95}; 96 97#define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ 98static pool_cache_t random_buf_pc __read_mostly; 99 100/* Entropy source for writes to /dev/random and /dev/urandom */ 101static krndsource_t user_rndsource; 102 103void 104rndattach(int num) 105{ 106 107 random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0, 108 "randombuf", NULL, IPL_NONE, NULL, NULL, NULL); 109 rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, 110 RND_FLAG_COLLECT_VALUE); 111} 112 113static int 114random_open(dev_t dev, int flags, int fmt, struct lwp *l) 115{ 116 117 /* Validate minor. */ 118 switch (minor(dev)) { 119 case RND_DEV_RANDOM: 120 case RND_DEV_URANDOM: 121 break; 122 default: 123 return ENXIO; 124 } 125 126 return 0; 127} 128 129static int 130random_close(dev_t dev, int flags, int fmt, struct lwp *l) 131{ 132 133 /* Success! */ 134 return 0; 135} 136 137static int 138random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) 139{ 140 141 /* 142 * No non-blocking/async options; otherwise defer to 143 * entropy_ioctl. 144 */ 145 switch (cmd) { 146 case FIONBIO: 147 case FIOASYNC: 148 return 0; 149 default: 150 return entropy_ioctl(cmd, data); 151 } 152} 153 154static int 155random_poll(dev_t dev, int events, struct lwp *l) 156{ 157 158 /* /dev/random may block; /dev/urandom is always ready. */ 159 switch (minor(dev)) { 160 case RND_DEV_RANDOM: 161 return entropy_poll(events); 162 case RND_DEV_URANDOM: 163 return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); 164 default: 165 return 0; 166 } 167} 168 169static int 170random_kqfilter(dev_t dev, struct knote *kn) 171{ 172 173 /* Validate the event filter. */ 174 switch (kn->kn_filter) { 175 case EVFILT_READ: 176 case EVFILT_WRITE: 177 break; 178 default: 179 return EINVAL; 180 } 181 182 /* /dev/random may block; /dev/urandom never does. */ 183 switch (minor(dev)) { 184 case RND_DEV_RANDOM: 185 if (kn->kn_filter == EVFILT_READ) 186 return entropy_kqfilter(kn); 187 /* FALLTHROUGH */ 188 case RND_DEV_URANDOM: 189 kn->kn_fop = &seltrue_filtops; 190 return 0; 191 default: 192 return ENXIO; 193 } 194} 195 196/* 197 * random_read(dev, uio, flags) 198 * 199 * Generate data from a PRNG seeded from the entropy pool. 200 * 201 * - If /dev/random, block until we have full entropy, or fail 202 * with EWOULDBLOCK, and if `depleting' entropy, return at most 203 * the entropy pool's capacity at once. 204 * 205 * - If /dev/urandom, generate data from whatever is in the 206 * entropy pool now. 207 * 208 * On interrupt, return a short read, but not shorter than 256 209 * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is 210 * 512 for hysterical raisins). 211 */ 212static int 213random_read(dev_t dev, struct uio *uio, int flags) 214{ 215 uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; 216 struct nist_hash_drbg drbg; 217 uint8_t *buf; 218 int extractflags; 219 bool interruptible; 220 int error; 221 222 /* Get a buffer for transfers. */ 223 buf = pool_cache_get(random_buf_pc, PR_WAITOK); 224 225 /* 226 * If it's a short read from /dev/urandom, just generate the 227 * output directly with per-CPU cprng_strong. 228 */ 229 if (minor(dev) == RND_DEV_URANDOM && 230 uio->uio_resid <= RANDOM_BUFSIZE) { 231 /* Generate data and transfer it out. */ 232 cprng_strong(user_cprng, buf, uio->uio_resid, 0); 233 error = uiomove(buf, uio->uio_resid, uio); 234 goto out; 235 } 236 237 /* 238 * If we're doing a blocking read from /dev/random, wait 239 * interruptibly. Otherwise, don't wait. 240 */ 241 if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK)) 242 extractflags = ENTROPY_WAIT|ENTROPY_SIG; 243 else 244 extractflags = 0; 245 246 /* 247 * Query the entropy pool. For /dev/random, stop here if this 248 * fails. For /dev/urandom, go on either way -- 249 * entropy_extract will always fill the buffer with what we 250 * have from the global pool. 251 */ 252 error = entropy_extract(seed, sizeof seed, extractflags); 253 if (minor(dev) == RND_DEV_RANDOM && error) 254 goto out; 255 256 /* Instantiate the DRBG. */ 257 if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0, 258 NULL, 0)) 259 panic("nist_hash_drbg_instantiate"); 260 261 /* Promptly zero the seed. */ 262 explicit_memset(seed, 0, sizeof seed); 263 264 /* 265 * Generate data. Assume no error until failure. No 266 * interruption at this point until we've generated at least 267 * one block of output. 268 */ 269 error = 0; 270 interruptible = false; 271 while (uio->uio_resid) { 272 size_t n = uio->uio_resid; 273 274 /* No more than one buffer's worth. */ 275 n = MIN(n, RANDOM_BUFSIZE); 276 277 /* 278 * Clamp /dev/random output to the entropy capacity and 279 * seed size. Programs can't rely on long reads. 280 */ 281 if (minor(dev) == RND_DEV_RANDOM) { 282 n = MIN(n, ENTROPY_CAPACITY); 283 n = MIN(n, sizeof seed); 284 /* 285 * Guarantee never to return more than one 286 * buffer in this case to minimize bookkeeping. 287 */ 288 CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE); 289 CTASSERT(sizeof seed <= RANDOM_BUFSIZE); 290 } 291 292 /* Yield if requested. */ 293 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 294 preempt(); 295 296 /* 297 * Allow interruption, but only after providing a 298 * minimum number of bytes. 299 */ 300 CTASSERT(RANDOM_BUFSIZE >= 256); 301 /* Check for interruption. */ 302 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 303 interruptible && sigispending(curlwp, 0)) { 304 error = EINTR; /* XXX ERESTART? */ 305 break; 306 } 307 308 /* 309 * Try to generate a block of data, but if we've hit 310 * the DRBG reseed interval, reseed. 311 */ 312 if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) { 313 /* 314 * Get a fresh seed without blocking -- we have 315 * already generated some output so it is not 316 * useful to block. This can fail only if the 317 * request is obscenely large, so it is OK for 318 * either /dev/random or /dev/urandom to fail: 319 * we make no promises about gigabyte-sized 320 * reads happening all at once. 321 */ 322 error = entropy_extract(seed, sizeof seed, 0); 323 if (error) 324 break; 325 326 /* Reseed and try again. */ 327 if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed, 328 NULL, 0)) 329 panic("nist_hash_drbg_reseed"); 330 331 /* Promptly zero the seed. */ 332 explicit_memset(seed, 0, sizeof seed); 333 334 /* If it fails now, that's a bug. */ 335 if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) 336 panic("nist_hash_drbg_generate"); 337 } 338 339 /* Transfer n bytes out. */ 340 error = uiomove(buf, n, uio); 341 if (error) 342 break; 343 344 /* 345 * If this is /dev/random, stop here, return what we 346 * have, and force the next read to reseed. Programs 347 * can't rely on /dev/random for long reads. 348 */ 349 if (minor(dev) == RND_DEV_RANDOM) { 350 error = 0; 351 break; 352 } 353 354 /* 355 * We have generated one block of output, so it is 356 * reasonable to allow interruption after this point. 357 */ 358 interruptible = true; 359 } 360 361out: /* Zero the buffer and return it to the pool cache. */ 362 explicit_memset(buf, 0, RANDOM_BUFSIZE); 363 pool_cache_put(random_buf_pc, buf); 364 365 return error; 366} 367 368/* 369 * random_write(dev, uio, flags) 370 * 371 * Enter data from uio into the entropy pool. 372 * 373 * Assume privileged users provide full entropy, and unprivileged 374 * users provide no entropy. If you have a nonuniform source of 375 * data with n bytes of min-entropy, hash it with an XOF like 376 * SHAKE128 into exactly n bytes first. 377 */ 378static int 379random_write(dev_t dev, struct uio *uio, int flags) 380{ 381 kauth_cred_t cred = kauth_cred_get(); 382 uint8_t *buf; 383 bool privileged = false, any = false; 384 int error = 0; 385 386 /* Verify user's authorization to affect the entropy pool. */ 387 error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, 388 NULL, NULL, NULL, NULL); 389 if (error) 390 return error; 391 392 /* 393 * Check whether user is privileged. If so, assume user 394 * furnishes full-entropy data; if not, accept user's data but 395 * assume it has zero entropy when we do accounting. If you 396 * want to specify less entropy, use ioctl(RNDADDDATA). 397 */ 398 if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 399 NULL, NULL, NULL, NULL) == 0) 400 privileged = true; 401 402 /* Get a buffer for transfers. */ 403 buf = pool_cache_get(random_buf_pc, PR_WAITOK); 404 405 /* Consume data. */ 406 while (uio->uio_resid) { 407 size_t n = uio->uio_resid; 408 409 /* No more than one buffer's worth in one step. */ 410 n = MIN(uio->uio_resid, RANDOM_BUFSIZE); 411 412 /* Yield if requested. */ 413 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 414 preempt(); 415 416 /* Check for interruption. */ 417 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 418 sigispending(curlwp, 0)) { 419 error = EINTR; /* XXX ERESTART? */ 420 break; 421 } 422 423 /* Transfer n bytes in and enter them into the pool. */ 424 error = uiomove(buf, n, uio); 425 if (error) 426 break; 427 rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); 428 any = true; 429 } 430 431 /* Zero the buffer and return it to the pool cache. */ 432 explicit_memset(buf, 0, RANDOM_BUFSIZE); 433 pool_cache_put(random_buf_pc, buf); 434 435 /* If we added anything, consolidate entropy now. */ 436 if (any) 437 entropy_consolidate(); 438 439 return error; 440} 441