random.c revision 1.5
1/* $NetBSD: random.c,v 1.5 2020/05/08 15:55:05 riastradh Exp $ */ 2 3/*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * /dev/random, /dev/urandom -- stateless version 34 * 35 * For short reads from /dev/urandom, up to 256 bytes, read from a 36 * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the 37 * system has enough entropy. 38 * 39 * For all other reads, instantiate a fresh NIST Hash_DRBG from 40 * the global entropy pool, and draw from it. 41 * 42 * Each read is independent; there is no per-open state. 43 * Concurrent reads from the same open run in parallel. 44 * 45 * Reading from /dev/random may block until entropy is available. 46 * Either device may return short reads if interrupted. 47 */ 48 49#include <sys/cdefs.h> 50__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.5 2020/05/08 15:55:05 riastradh Exp $"); 51 52#include <sys/param.h> 53#include <sys/types.h> 54#include <sys/atomic.h> 55#include <sys/conf.h> 56#include <sys/cprng.h> 57#include <sys/entropy.h> 58#include <sys/errno.h> 59#include <sys/event.h> 60#include <sys/fcntl.h> 61#include <sys/kauth.h> 62#include <sys/kmem.h> 63#include <sys/lwp.h> 64#include <sys/poll.h> 65#include <sys/rnd.h> 66#include <sys/rndsource.h> 67#include <sys/signalvar.h> 68#include <sys/systm.h> 69 70#include <crypto/nist_hash_drbg/nist_hash_drbg.h> 71 72#include "ioconf.h" 73 74static dev_type_open(random_open); 75static dev_type_close(random_close); 76static dev_type_ioctl(random_ioctl); 77static dev_type_poll(random_poll); 78static dev_type_kqfilter(random_kqfilter); 79static dev_type_read(random_read); 80static dev_type_write(random_write); 81 82const struct cdevsw rnd_cdevsw = { 83 .d_open = random_open, 84 .d_close = random_close, 85 .d_read = random_read, 86 .d_write = random_write, 87 .d_ioctl = random_ioctl, 88 .d_stop = nostop, 89 .d_tty = notty, 90 .d_poll = random_poll, 91 .d_mmap = nommap, 92 .d_kqfilter = random_kqfilter, 93 .d_discard = nodiscard, 94 .d_flag = D_OTHER|D_MPSAFE, 95}; 96 97#define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ 98 99/* Entropy source for writes to /dev/random and /dev/urandom */ 100static krndsource_t user_rndsource; 101 102void 103rndattach(int num) 104{ 105 106 rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, 107 RND_FLAG_COLLECT_VALUE); 108} 109 110static int 111random_open(dev_t dev, int flags, int fmt, struct lwp *l) 112{ 113 114 /* Validate minor. */ 115 switch (minor(dev)) { 116 case RND_DEV_RANDOM: 117 case RND_DEV_URANDOM: 118 break; 119 default: 120 return ENXIO; 121 } 122 123 return 0; 124} 125 126static int 127random_close(dev_t dev, int flags, int fmt, struct lwp *l) 128{ 129 130 /* Success! */ 131 return 0; 132} 133 134static int 135random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) 136{ 137 138 /* 139 * No non-blocking/async options; otherwise defer to 140 * entropy_ioctl. 141 */ 142 switch (cmd) { 143 case FIONBIO: 144 case FIOASYNC: 145 return 0; 146 default: 147 return entropy_ioctl(cmd, data); 148 } 149} 150 151static int 152random_poll(dev_t dev, int events, struct lwp *l) 153{ 154 155 /* /dev/random may block; /dev/urandom is always ready. */ 156 switch (minor(dev)) { 157 case RND_DEV_RANDOM: 158 return entropy_poll(events); 159 case RND_DEV_URANDOM: 160 return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); 161 default: 162 return 0; 163 } 164} 165 166static int 167random_kqfilter(dev_t dev, struct knote *kn) 168{ 169 170 /* Validate the event filter. */ 171 switch (kn->kn_filter) { 172 case EVFILT_READ: 173 case EVFILT_WRITE: 174 break; 175 default: 176 return EINVAL; 177 } 178 179 /* /dev/random may block; /dev/urandom never does. */ 180 switch (minor(dev)) { 181 case RND_DEV_RANDOM: 182 if (kn->kn_filter == EVFILT_READ) 183 return entropy_kqfilter(kn); 184 /* FALLTHROUGH */ 185 case RND_DEV_URANDOM: 186 kn->kn_fop = &seltrue_filtops; 187 return 0; 188 default: 189 return ENXIO; 190 } 191} 192 193/* 194 * random_read(dev, uio, flags) 195 * 196 * Generate data from a PRNG seeded from the entropy pool. 197 * 198 * - If /dev/random, block until we have full entropy, or fail 199 * with EWOULDBLOCK, and if `depleting' entropy, return at most 200 * the entropy pool's capacity at once. 201 * 202 * - If /dev/urandom, generate data from whatever is in the 203 * entropy pool now. 204 * 205 * On interrupt, return a short read, but not shorter than 256 206 * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is 207 * 512 for hysterical raisins). 208 */ 209static int 210random_read(dev_t dev, struct uio *uio, int flags) 211{ 212 uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; 213 struct nist_hash_drbg drbg; 214 uint8_t *buf; 215 int extractflags; 216 bool interruptible; 217 int error; 218 219 /* Get a buffer for transfers. */ 220 buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); 221 222 /* 223 * If it's a short read from /dev/urandom, just generate the 224 * output directly with per-CPU cprng_strong. 225 */ 226 if (minor(dev) == RND_DEV_URANDOM && 227 uio->uio_resid <= RANDOM_BUFSIZE) { 228 /* Generate data and transfer it out. */ 229 cprng_strong(user_cprng, buf, uio->uio_resid, 0); 230 error = uiomove(buf, uio->uio_resid, uio); 231 goto out; 232 } 233 234 /* 235 * If we're doing a blocking read from /dev/random, wait 236 * interruptibly. Otherwise, don't wait. 237 */ 238 if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK)) 239 extractflags = ENTROPY_WAIT|ENTROPY_SIG; 240 else 241 extractflags = 0; 242 243 /* 244 * Query the entropy pool. For /dev/random, stop here if this 245 * fails. For /dev/urandom, go on either way -- 246 * entropy_extract will always fill the buffer with what we 247 * have from the global pool. 248 */ 249 error = entropy_extract(seed, sizeof seed, extractflags); 250 if (minor(dev) == RND_DEV_RANDOM && error) 251 goto out; 252 253 /* Instantiate the DRBG. */ 254 if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0, 255 NULL, 0)) 256 panic("nist_hash_drbg_instantiate"); 257 258 /* Promptly zero the seed. */ 259 explicit_memset(seed, 0, sizeof seed); 260 261 /* 262 * Generate data. Assume no error until failure. No 263 * interruption at this point until we've generated at least 264 * one block of output. 265 */ 266 error = 0; 267 interruptible = false; 268 while (uio->uio_resid) { 269 size_t n = uio->uio_resid; 270 271 /* No more than one buffer's worth. */ 272 n = MIN(n, RANDOM_BUFSIZE); 273 274 /* 275 * Clamp /dev/random output to the entropy capacity and 276 * seed size. Programs can't rely on long reads. 277 */ 278 if (minor(dev) == RND_DEV_RANDOM) { 279 n = MIN(n, ENTROPY_CAPACITY); 280 n = MIN(n, sizeof seed); 281 /* 282 * Guarantee never to return more than one 283 * buffer in this case to minimize bookkeeping. 284 */ 285 CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE); 286 CTASSERT(sizeof seed <= RANDOM_BUFSIZE); 287 } 288 289 /* Yield if requested. */ 290 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 291 preempt(); 292 293 /* 294 * Allow interruption, but only after providing a 295 * minimum number of bytes. 296 */ 297 CTASSERT(RANDOM_BUFSIZE >= 256); 298 /* Check for interruption. */ 299 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 300 interruptible && sigispending(curlwp, 0)) { 301 error = EINTR; /* XXX ERESTART? */ 302 break; 303 } 304 305 /* 306 * Try to generate a block of data, but if we've hit 307 * the DRBG reseed interval, reseed. 308 */ 309 if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) { 310 /* 311 * Get a fresh seed without blocking -- we have 312 * already generated some output so it is not 313 * useful to block. This can fail only if the 314 * request is obscenely large, so it is OK for 315 * either /dev/random or /dev/urandom to fail: 316 * we make no promises about gigabyte-sized 317 * reads happening all at once. 318 */ 319 error = entropy_extract(seed, sizeof seed, 0); 320 if (error) 321 break; 322 323 /* Reseed and try again. */ 324 if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed, 325 NULL, 0)) 326 panic("nist_hash_drbg_reseed"); 327 328 /* Promptly zero the seed. */ 329 explicit_memset(seed, 0, sizeof seed); 330 331 /* If it fails now, that's a bug. */ 332 if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) 333 panic("nist_hash_drbg_generate"); 334 } 335 336 /* Transfer n bytes out. */ 337 error = uiomove(buf, n, uio); 338 if (error) 339 break; 340 341 /* 342 * If this is /dev/random, stop here, return what we 343 * have, and force the next read to reseed. Programs 344 * can't rely on /dev/random for long reads. 345 */ 346 if (minor(dev) == RND_DEV_RANDOM) { 347 error = 0; 348 break; 349 } 350 351 /* 352 * We have generated one block of output, so it is 353 * reasonable to allow interruption after this point. 354 */ 355 interruptible = true; 356 } 357 358out: /* Zero the buffer and free it. */ 359 explicit_memset(buf, 0, RANDOM_BUFSIZE); 360 kmem_free(buf, RANDOM_BUFSIZE); 361 362 return error; 363} 364 365/* 366 * random_write(dev, uio, flags) 367 * 368 * Enter data from uio into the entropy pool. 369 * 370 * Assume privileged users provide full entropy, and unprivileged 371 * users provide no entropy. If you have a nonuniform source of 372 * data with n bytes of min-entropy, hash it with an XOF like 373 * SHAKE128 into exactly n bytes first. 374 */ 375static int 376random_write(dev_t dev, struct uio *uio, int flags) 377{ 378 kauth_cred_t cred = kauth_cred_get(); 379 uint8_t *buf; 380 bool privileged = false, any = false; 381 int error = 0; 382 383 /* Verify user's authorization to affect the entropy pool. */ 384 error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, 385 NULL, NULL, NULL, NULL); 386 if (error) 387 return error; 388 389 /* 390 * Check whether user is privileged. If so, assume user 391 * furnishes full-entropy data; if not, accept user's data but 392 * assume it has zero entropy when we do accounting. If you 393 * want to specify less entropy, use ioctl(RNDADDDATA). 394 */ 395 if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 396 NULL, NULL, NULL, NULL) == 0) 397 privileged = true; 398 399 /* Get a buffer for transfers. */ 400 buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); 401 402 /* Consume data. */ 403 while (uio->uio_resid) { 404 size_t n = uio->uio_resid; 405 406 /* No more than one buffer's worth in one step. */ 407 n = MIN(uio->uio_resid, RANDOM_BUFSIZE); 408 409 /* Yield if requested. */ 410 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 411 preempt(); 412 413 /* Check for interruption. */ 414 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 415 sigispending(curlwp, 0)) { 416 error = EINTR; /* XXX ERESTART? */ 417 break; 418 } 419 420 /* Transfer n bytes in and enter them into the pool. */ 421 error = uiomove(buf, n, uio); 422 if (error) 423 break; 424 rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); 425 any = true; 426 } 427 428 /* Zero the buffer and free it. */ 429 explicit_memset(buf, 0, RANDOM_BUFSIZE); 430 kmem_free(buf, RANDOM_BUFSIZE); 431 432 /* If we added anything, consolidate entropy now. */ 433 if (any) 434 entropy_consolidate(); 435 436 return error; 437} 438