random.c revision 1.1
11.1Sriastrad/*	$NetBSD: random.c,v 1.1 2020/04/30 03:28:18 riastradh Exp $	*/
21.1Sriastrad
31.1Sriastrad/*-
41.1Sriastrad * Copyright (c) 2019 The NetBSD Foundation, Inc.
51.1Sriastrad * All rights reserved.
61.1Sriastrad *
71.1Sriastrad * This code is derived from software contributed to The NetBSD Foundation
81.1Sriastrad * by Taylor R. Campbell.
91.1Sriastrad *
101.1Sriastrad * Redistribution and use in source and binary forms, with or without
111.1Sriastrad * modification, are permitted provided that the following conditions
121.1Sriastrad * are met:
131.1Sriastrad * 1. Redistributions of source code must retain the above copyright
141.1Sriastrad *    notice, this list of conditions and the following disclaimer.
151.1Sriastrad * 2. Redistributions in binary form must reproduce the above copyright
161.1Sriastrad *    notice, this list of conditions and the following disclaimer in the
171.1Sriastrad *    documentation and/or other materials provided with the distribution.
181.1Sriastrad *
191.1Sriastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
201.1Sriastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
211.1Sriastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
221.1Sriastrad * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
231.1Sriastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
241.1Sriastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
251.1Sriastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
261.1Sriastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
271.1Sriastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
281.1Sriastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
291.1Sriastrad * POSSIBILITY OF SUCH DAMAGE.
301.1Sriastrad */
311.1Sriastrad
321.1Sriastrad/*
331.1Sriastrad * /dev/random, /dev/urandom -- stateless version
341.1Sriastrad *
351.1Sriastrad *	For short reads from /dev/urandom, up to 256 bytes, read from a
361.1Sriastrad *	per-CPU NIST Hash_DRBG instance that is reseeded as soon as the
371.1Sriastrad *	system has enough entropy.
381.1Sriastrad *
391.1Sriastrad *	For all other reads, instantiate a fresh NIST Hash_DRBG from
401.1Sriastrad *	the global entropy pool, and draw from it.
411.1Sriastrad *
421.1Sriastrad *	Each read is independent; there is no per-open state.
431.1Sriastrad *	Concurrent reads from the same open run in parallel.
441.1Sriastrad *
451.1Sriastrad *	Reading from /dev/random may block until entropy is available.
461.1Sriastrad *	Either device may return short reads if interrupted.
471.1Sriastrad */
481.1Sriastrad
491.1Sriastrad#include <sys/cdefs.h>
501.1Sriastrad__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.1 2020/04/30 03:28:18 riastradh Exp $");
511.1Sriastrad
521.1Sriastrad#include <sys/param.h>
531.1Sriastrad#include <sys/types.h>
541.1Sriastrad#include <sys/conf.h>
551.1Sriastrad#include <sys/cprng.h>
561.1Sriastrad#include <sys/entropy.h>
571.1Sriastrad#include <sys/errno.h>
581.1Sriastrad#include <sys/event.h>
591.1Sriastrad#include <sys/fcntl.h>
601.1Sriastrad#include <sys/kauth.h>
611.1Sriastrad#include <sys/lwp.h>
621.1Sriastrad#include <sys/poll.h>
631.1Sriastrad#include <sys/pool.h>
641.1Sriastrad#include <sys/rnd.h>
651.1Sriastrad#include <sys/rndsource.h>
661.1Sriastrad#include <sys/signalvar.h>
671.1Sriastrad#include <sys/systm.h>
681.1Sriastrad
691.1Sriastrad#include <crypto/nist_hash_drbg/nist_hash_drbg.h>
701.1Sriastrad
711.1Sriastrad#include "ioconf.h"
721.1Sriastrad
731.1Sriastradstatic dev_type_open(random_open);
741.1Sriastradstatic dev_type_close(random_close);
751.1Sriastradstatic dev_type_ioctl(random_ioctl);
761.1Sriastradstatic dev_type_poll(random_poll);
771.1Sriastradstatic dev_type_kqfilter(random_kqfilter);
781.1Sriastradstatic dev_type_read(random_read);
791.1Sriastradstatic dev_type_write(random_write);
801.1Sriastrad
811.1Sriastradconst struct cdevsw rnd_cdevsw = {
821.1Sriastrad	.d_open = random_open,
831.1Sriastrad	.d_close = random_close,
841.1Sriastrad	.d_read = random_read,
851.1Sriastrad	.d_write = random_write,
861.1Sriastrad	.d_ioctl = random_ioctl,
871.1Sriastrad	.d_stop = nostop,
881.1Sriastrad	.d_tty = notty,
891.1Sriastrad	.d_poll = random_poll,
901.1Sriastrad	.d_mmap = nommap,
911.1Sriastrad	.d_kqfilter = random_kqfilter,
921.1Sriastrad	.d_discard = nodiscard,
931.1Sriastrad	.d_flag = D_OTHER|D_MPSAFE,
941.1Sriastrad};
951.1Sriastrad
961.1Sriastrad#define	RANDOM_BUFSIZE	512	/* XXX pulled from arse */
971.1Sriastradstatic pool_cache_t random_buf_pc __read_mostly;
981.1Sriastrad
991.1Sriastrad/* Entropy source for writes to /dev/random and /dev/urandom */
1001.1Sriastradstatic krndsource_t	user_rndsource;
1011.1Sriastrad
1021.1Sriastradvoid
1031.1Sriastradrndattach(int num)
1041.1Sriastrad{
1051.1Sriastrad
1061.1Sriastrad	random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0,
1071.1Sriastrad	    "randombuf", NULL, IPL_NONE, NULL, NULL, NULL);
1081.1Sriastrad	rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN,
1091.1Sriastrad	    RND_FLAG_COLLECT_VALUE);
1101.1Sriastrad}
1111.1Sriastrad
1121.1Sriastradstatic int
1131.1Sriastradrandom_open(dev_t dev, int flags, int fmt, struct lwp *l)
1141.1Sriastrad{
1151.1Sriastrad
1161.1Sriastrad	/* Validate minor.  */
1171.1Sriastrad	switch (minor(dev)) {
1181.1Sriastrad	case RND_DEV_RANDOM:
1191.1Sriastrad	case RND_DEV_URANDOM:
1201.1Sriastrad		break;
1211.1Sriastrad	default:
1221.1Sriastrad		return ENXIO;
1231.1Sriastrad	}
1241.1Sriastrad
1251.1Sriastrad	return 0;
1261.1Sriastrad}
1271.1Sriastrad
1281.1Sriastradstatic int
1291.1Sriastradrandom_close(dev_t dev, int flags, int fmt, struct lwp *l)
1301.1Sriastrad{
1311.1Sriastrad
1321.1Sriastrad	/* Success!  */
1331.1Sriastrad	return 0;
1341.1Sriastrad}
1351.1Sriastrad
1361.1Sriastradstatic int
1371.1Sriastradrandom_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l)
1381.1Sriastrad{
1391.1Sriastrad
1401.1Sriastrad	/*
1411.1Sriastrad	 * No non-blocking/async options; otherwise defer to
1421.1Sriastrad	 * entropy_ioctl.
1431.1Sriastrad	 */
1441.1Sriastrad	switch (cmd) {
1451.1Sriastrad	case FIONBIO:
1461.1Sriastrad	case FIOASYNC:
1471.1Sriastrad		return 0;
1481.1Sriastrad	default:
1491.1Sriastrad		return entropy_ioctl(cmd, data);
1501.1Sriastrad	}
1511.1Sriastrad}
1521.1Sriastrad
1531.1Sriastradstatic int
1541.1Sriastradrandom_poll(dev_t dev, int events, struct lwp *l)
1551.1Sriastrad{
1561.1Sriastrad
1571.1Sriastrad	/* /dev/random may block; /dev/urandom is always ready.  */
1581.1Sriastrad	switch (minor(dev)) {
1591.1Sriastrad	case RND_DEV_RANDOM:
1601.1Sriastrad		return entropy_poll(events);
1611.1Sriastrad	case RND_DEV_URANDOM:
1621.1Sriastrad		return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM);
1631.1Sriastrad	default:
1641.1Sriastrad		return 0;
1651.1Sriastrad	}
1661.1Sriastrad}
1671.1Sriastrad
1681.1Sriastradstatic int
1691.1Sriastradrandom_kqfilter(dev_t dev, struct knote *kn)
1701.1Sriastrad{
1711.1Sriastrad
1721.1Sriastrad	/* Validate the event filter.  */
1731.1Sriastrad	switch (kn->kn_filter) {
1741.1Sriastrad	case EVFILT_READ:
1751.1Sriastrad	case EVFILT_WRITE:
1761.1Sriastrad		break;
1771.1Sriastrad	default:
1781.1Sriastrad		return EINVAL;
1791.1Sriastrad	}
1801.1Sriastrad
1811.1Sriastrad	/* /dev/random may block; /dev/urandom never does.  */
1821.1Sriastrad	switch (minor(dev)) {
1831.1Sriastrad	case RND_DEV_RANDOM:
1841.1Sriastrad		if (kn->kn_filter == EVFILT_READ)
1851.1Sriastrad			return entropy_kqfilter(kn);
1861.1Sriastrad		/* FALLTHROUGH */
1871.1Sriastrad	case RND_DEV_URANDOM:
1881.1Sriastrad		kn->kn_fop = &seltrue_filtops;
1891.1Sriastrad		return 0;
1901.1Sriastrad	default:
1911.1Sriastrad		return ENXIO;
1921.1Sriastrad	}
1931.1Sriastrad}
1941.1Sriastrad
1951.1Sriastrad/*
1961.1Sriastrad * random_read(dev, uio, flags)
1971.1Sriastrad *
1981.1Sriastrad *	Generate data from a PRNG seeded from the entropy pool.
1991.1Sriastrad *
2001.1Sriastrad *	- If /dev/random, block until we have full entropy, or fail
2011.1Sriastrad *	  with EWOULDBLOCK, and if `depleting' entropy, return at most
2021.1Sriastrad *	  the entropy pool's capacity at once.
2031.1Sriastrad *
2041.1Sriastrad *	- If /dev/urandom, generate data from whatever is in the
2051.1Sriastrad *	  entropy pool now.
2061.1Sriastrad *
2071.1Sriastrad *	On interrupt, return a short read, but not shorter than 256
2081.1Sriastrad *	bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is
2091.1Sriastrad *	512 for hysterical raisins).
2101.1Sriastrad */
2111.1Sriastradstatic int
2121.1Sriastradrandom_read(dev_t dev, struct uio *uio, int flags)
2131.1Sriastrad{
2141.1Sriastrad	uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0};
2151.1Sriastrad	struct nist_hash_drbg drbg;
2161.1Sriastrad	uint8_t *buf;
2171.1Sriastrad	int extractflags;
2181.1Sriastrad	bool interruptible;
2191.1Sriastrad	int error;
2201.1Sriastrad
2211.1Sriastrad	/* Get a buffer for transfers.  */
2221.1Sriastrad	buf = pool_cache_get(random_buf_pc, PR_WAITOK);
2231.1Sriastrad
2241.1Sriastrad	/*
2251.1Sriastrad	 * If it's a short read from /dev/urandom, just generate the
2261.1Sriastrad	 * output directly with per-CPU cprng_strong.
2271.1Sriastrad	 */
2281.1Sriastrad	if (minor(dev) == RND_DEV_URANDOM &&
2291.1Sriastrad	    uio->uio_resid <= RANDOM_BUFSIZE) {
2301.1Sriastrad		/* Generate data and transfer it out.  */
2311.1Sriastrad		cprng_strong(user_cprng, buf, uio->uio_resid, 0);
2321.1Sriastrad		error = uiomove(buf, uio->uio_resid, uio);
2331.1Sriastrad		goto out;
2341.1Sriastrad	}
2351.1Sriastrad
2361.1Sriastrad	/*
2371.1Sriastrad	 * If we're doing a blocking read from /dev/random, wait
2381.1Sriastrad	 * interruptibly.  Otherwise, don't wait.
2391.1Sriastrad	 */
2401.1Sriastrad	if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK))
2411.1Sriastrad		extractflags = ENTROPY_WAIT|ENTROPY_SIG;
2421.1Sriastrad	else
2431.1Sriastrad		extractflags = 0;
2441.1Sriastrad
2451.1Sriastrad	/*
2461.1Sriastrad	 * Query the entropy pool.  For /dev/random, stop here if this
2471.1Sriastrad	 * fails.  For /dev/urandom, go on either way --
2481.1Sriastrad	 * entropy_extract will always fill the buffer with what we
2491.1Sriastrad	 * have from the global pool.
2501.1Sriastrad	 */
2511.1Sriastrad	error = entropy_extract(seed, sizeof seed, extractflags);
2521.1Sriastrad	if (minor(dev) == RND_DEV_RANDOM && error)
2531.1Sriastrad		goto out;
2541.1Sriastrad
2551.1Sriastrad	/* Instantiate the DRBG.  */
2561.1Sriastrad	if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0,
2571.1Sriastrad		NULL, 0))
2581.1Sriastrad		panic("nist_hash_drbg_instantiate");
2591.1Sriastrad
2601.1Sriastrad	/* Promptly zero the seed.  */
2611.1Sriastrad	explicit_memset(seed, 0, sizeof seed);
2621.1Sriastrad
2631.1Sriastrad	/*
2641.1Sriastrad	 * Generate data.  Assume no error until failure.  No
2651.1Sriastrad	 * interruption at this point until we've generated at least
2661.1Sriastrad	 * one block of output.
2671.1Sriastrad	 */
2681.1Sriastrad	error = 0;
2691.1Sriastrad	interruptible = false;
2701.1Sriastrad	while (uio->uio_resid) {
2711.1Sriastrad		size_t n = uio->uio_resid;
2721.1Sriastrad
2731.1Sriastrad		/* No more than one buffer's worth.  */
2741.1Sriastrad		n = MIN(n, RANDOM_BUFSIZE);
2751.1Sriastrad
2761.1Sriastrad		/*
2771.1Sriastrad		 * If we're `depleting' and this is /dev/random, clamp
2781.1Sriastrad		 * to the smaller of the entropy capacity or the seed.
2791.1Sriastrad		 */
2801.1Sriastrad		if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
2811.1Sriastrad		    minor(dev) == RND_DEV_RANDOM) {
2821.1Sriastrad			n = MIN(n, ENTROPY_CAPACITY);
2831.1Sriastrad			n = MIN(n, sizeof seed);
2841.1Sriastrad			/*
2851.1Sriastrad			 * Guarantee never to return more than one
2861.1Sriastrad			 * buffer in this case to minimize bookkeeping.
2871.1Sriastrad			 */
2881.1Sriastrad			CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE);
2891.1Sriastrad			CTASSERT(sizeof seed <= RANDOM_BUFSIZE);
2901.1Sriastrad		}
2911.1Sriastrad
2921.1Sriastrad		/* Yield if requested.  */
2931.1Sriastrad		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
2941.1Sriastrad			preempt();
2951.1Sriastrad
2961.1Sriastrad		/*
2971.1Sriastrad		 * Allow interruption, but only after providing a
2981.1Sriastrad		 * minimum number of bytes.
2991.1Sriastrad		 */
3001.1Sriastrad		CTASSERT(RANDOM_BUFSIZE >= 256);
3011.1Sriastrad		/* Check for interruption.  */
3021.1Sriastrad		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
3031.1Sriastrad		    interruptible && sigispending(curlwp, 0)) {
3041.1Sriastrad			error = EINTR; /* XXX ERESTART? */
3051.1Sriastrad			break;
3061.1Sriastrad		}
3071.1Sriastrad
3081.1Sriastrad		/*
3091.1Sriastrad		 * Try to generate a block of data, but if we've hit
3101.1Sriastrad		 * the DRBG reseed interval, reseed.
3111.1Sriastrad		 */
3121.1Sriastrad		if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) {
3131.1Sriastrad			/*
3141.1Sriastrad			 * Get a fresh seed without blocking -- we have
3151.1Sriastrad			 * already generated some output so it is not
3161.1Sriastrad			 * useful to block.  This can fail only if the
3171.1Sriastrad			 * request is obscenely large, so it is OK for
3181.1Sriastrad			 * either /dev/random or /dev/urandom to fail:
3191.1Sriastrad			 * we make no promises about gigabyte-sized
3201.1Sriastrad			 * reads happening all at once.
3211.1Sriastrad			 */
3221.1Sriastrad			error = entropy_extract(seed, sizeof seed, 0);
3231.1Sriastrad			if (error)
3241.1Sriastrad				break;
3251.1Sriastrad
3261.1Sriastrad			/* Reseed and try again.  */
3271.1Sriastrad			if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed,
3281.1Sriastrad				NULL, 0))
3291.1Sriastrad				panic("nist_hash_drbg_reseed");
3301.1Sriastrad
3311.1Sriastrad			/* Promptly zero the seed.  */
3321.1Sriastrad			explicit_memset(seed, 0, sizeof seed);
3331.1Sriastrad
3341.1Sriastrad			/* If it fails now, that's a bug.  */
3351.1Sriastrad			if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0))
3361.1Sriastrad				panic("nist_hash_drbg_generate");
3371.1Sriastrad		}
3381.1Sriastrad
3391.1Sriastrad		/* Transfer n bytes out.  */
3401.1Sriastrad		error = uiomove(buf, n, uio);
3411.1Sriastrad		if (error)
3421.1Sriastrad			break;
3431.1Sriastrad
3441.1Sriastrad		/*
3451.1Sriastrad		 * If we're `depleting' and this is /dev/random, stop
3461.1Sriastrad		 * here, return what we have, and force the next read
3471.1Sriastrad		 * to reseed.  Could grab more from the pool if
3481.1Sriastrad		 * possible without blocking, but that's more
3491.1Sriastrad		 * work.
3501.1Sriastrad		 */
3511.1Sriastrad		if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
3521.1Sriastrad		    minor(dev) == RND_DEV_RANDOM) {
3531.1Sriastrad			error = 0;
3541.1Sriastrad			break;
3551.1Sriastrad		}
3561.1Sriastrad
3571.1Sriastrad		/*
3581.1Sriastrad		 * We have generated one block of output, so it is
3591.1Sriastrad		 * reasonable to allow interruption after this point.
3601.1Sriastrad		 */
3611.1Sriastrad		interruptible = true;
3621.1Sriastrad	}
3631.1Sriastrad
3641.1Sriastradout:	/* Zero the buffer and return it to the pool cache.  */
3651.1Sriastrad	explicit_memset(buf, 0, RANDOM_BUFSIZE);
3661.1Sriastrad	pool_cache_put(random_buf_pc, buf);
3671.1Sriastrad
3681.1Sriastrad	return error;
3691.1Sriastrad}
3701.1Sriastrad
3711.1Sriastrad/*
3721.1Sriastrad * random_write(dev, uio, flags)
3731.1Sriastrad *
3741.1Sriastrad *	Enter data from uio into the entropy pool.
3751.1Sriastrad *
3761.1Sriastrad *	Assume privileged users provide full entropy, and unprivileged
3771.1Sriastrad *	users provide no entropy.  If you have a nonuniform source of
3781.1Sriastrad *	data with n bytes of min-entropy, hash it with an XOF like
3791.1Sriastrad *	SHAKE128 into exactly n bytes first.
3801.1Sriastrad */
3811.1Sriastradstatic int
3821.1Sriastradrandom_write(dev_t dev, struct uio *uio, int flags)
3831.1Sriastrad{
3841.1Sriastrad	kauth_cred_t cred = kauth_cred_get();
3851.1Sriastrad	uint8_t *buf;
3861.1Sriastrad	bool privileged = false;
3871.1Sriastrad	int error = 0;
3881.1Sriastrad
3891.1Sriastrad	/* Verify user's authorization to affect the entropy pool.  */
3901.1Sriastrad	error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA,
3911.1Sriastrad	    NULL, NULL, NULL, NULL);
3921.1Sriastrad	if (error)
3931.1Sriastrad		return error;
3941.1Sriastrad
3951.1Sriastrad	/*
3961.1Sriastrad	 * Check whether user is privileged.  If so, assume user
3971.1Sriastrad	 * furnishes full-entropy data; if not, accept user's data but
3981.1Sriastrad	 * assume it has zero entropy when we do accounting.  If you
3991.1Sriastrad	 * want to specify less entropy, use ioctl(RNDADDDATA).
4001.1Sriastrad	 */
4011.1Sriastrad	if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
4021.1Sriastrad		NULL, NULL, NULL, NULL) == 0)
4031.1Sriastrad		privileged = true;
4041.1Sriastrad
4051.1Sriastrad	/* Get a buffer for transfers.  */
4061.1Sriastrad	buf = pool_cache_get(random_buf_pc, PR_WAITOK);
4071.1Sriastrad
4081.1Sriastrad	/* Consume data.  */
4091.1Sriastrad	while (uio->uio_resid) {
4101.1Sriastrad		size_t n = uio->uio_resid;
4111.1Sriastrad
4121.1Sriastrad		/* No more than one buffer's worth in one step.  */
4131.1Sriastrad		n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
4141.1Sriastrad
4151.1Sriastrad		/* Yield if requested.  */
4161.1Sriastrad		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
4171.1Sriastrad			preempt();
4181.1Sriastrad
4191.1Sriastrad		/* Check for interruption.  */
4201.1Sriastrad		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
4211.1Sriastrad		    sigispending(curlwp, 0)) {
4221.1Sriastrad			error = EINTR; /* XXX ERESTART?  */
4231.1Sriastrad			break;
4241.1Sriastrad		}
4251.1Sriastrad
4261.1Sriastrad		/* Transfer n bytes in and enter them into the pool.  */
4271.1Sriastrad		error = uiomove(buf, n, uio);
4281.1Sriastrad		if (error)
4291.1Sriastrad			break;
4301.1Sriastrad		rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0);
4311.1Sriastrad	}
4321.1Sriastrad
4331.1Sriastrad	/* Zero the buffer and return it to the pool cache.  */
4341.1Sriastrad	explicit_memset(buf, 0, RANDOM_BUFSIZE);
4351.1Sriastrad	pool_cache_put(random_buf_pc, buf);
4361.1Sriastrad	return error;
4371.1Sriastrad}
438