xref: /netbsd-src/sys/dev/random.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /*	$NetBSD: random.c,v 1.7 2020/05/08 16:05:36 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * /dev/random, /dev/urandom -- stateless version
34  *
35  *	For short reads from /dev/urandom, up to 256 bytes, read from a
36  *	per-CPU NIST Hash_DRBG instance that is reseeded as soon as the
37  *	system has enough entropy.
38  *
39  *	For all other reads, instantiate a fresh NIST Hash_DRBG from
40  *	the global entropy pool, and draw from it.
41  *
42  *	Each read is independent; there is no per-open state.
43  *	Concurrent reads from the same open run in parallel.
44  *
45  *	Reading from /dev/random may block until entropy is available.
46  *	Either device may return short reads if interrupted.
47  */
48 
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: random.c,v 1.7 2020/05/08 16:05:36 riastradh Exp $");
51 
52 #include <sys/param.h>
53 #include <sys/types.h>
54 #include <sys/atomic.h>
55 #include <sys/conf.h>
56 #include <sys/cprng.h>
57 #include <sys/entropy.h>
58 #include <sys/errno.h>
59 #include <sys/event.h>
60 #include <sys/fcntl.h>
61 #include <sys/kauth.h>
62 #include <sys/kmem.h>
63 #include <sys/lwp.h>
64 #include <sys/poll.h>
65 #include <sys/rnd.h>
66 #include <sys/rndsource.h>
67 #include <sys/signalvar.h>
68 #include <sys/systm.h>
69 
70 #include <crypto/nist_hash_drbg/nist_hash_drbg.h>
71 
72 #include "ioconf.h"
73 
74 static dev_type_open(random_open);
75 static dev_type_close(random_close);
76 static dev_type_ioctl(random_ioctl);
77 static dev_type_poll(random_poll);
78 static dev_type_kqfilter(random_kqfilter);
79 static dev_type_read(random_read);
80 static dev_type_write(random_write);
81 
82 const struct cdevsw rnd_cdevsw = {
83 	.d_open = random_open,
84 	.d_close = random_close,
85 	.d_read = random_read,
86 	.d_write = random_write,
87 	.d_ioctl = random_ioctl,
88 	.d_stop = nostop,
89 	.d_tty = notty,
90 	.d_poll = random_poll,
91 	.d_mmap = nommap,
92 	.d_kqfilter = random_kqfilter,
93 	.d_discard = nodiscard,
94 	.d_flag = D_OTHER|D_MPSAFE,
95 };
96 
97 #define	RANDOM_BUFSIZE	512	/* XXX pulled from arse */
98 
99 /* Entropy source for writes to /dev/random and /dev/urandom */
100 static krndsource_t	user_rndsource;
101 
102 void
103 rndattach(int num)
104 {
105 
106 	rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN,
107 	    RND_FLAG_COLLECT_VALUE);
108 }
109 
110 static int
111 random_open(dev_t dev, int flags, int fmt, struct lwp *l)
112 {
113 
114 	/* Validate minor.  */
115 	switch (minor(dev)) {
116 	case RND_DEV_RANDOM:
117 	case RND_DEV_URANDOM:
118 		break;
119 	default:
120 		return ENXIO;
121 	}
122 
123 	return 0;
124 }
125 
126 static int
127 random_close(dev_t dev, int flags, int fmt, struct lwp *l)
128 {
129 
130 	/* Success!  */
131 	return 0;
132 }
133 
134 static int
135 random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l)
136 {
137 
138 	/*
139 	 * No non-blocking/async options; otherwise defer to
140 	 * entropy_ioctl.
141 	 */
142 	switch (cmd) {
143 	case FIONBIO:
144 	case FIOASYNC:
145 		return 0;
146 	default:
147 		return entropy_ioctl(cmd, data);
148 	}
149 }
150 
151 static int
152 random_poll(dev_t dev, int events, struct lwp *l)
153 {
154 
155 	/* /dev/random may block; /dev/urandom is always ready.  */
156 	switch (minor(dev)) {
157 	case RND_DEV_RANDOM:
158 		return entropy_poll(events);
159 	case RND_DEV_URANDOM:
160 		return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM);
161 	default:
162 		return 0;
163 	}
164 }
165 
166 static int
167 random_kqfilter(dev_t dev, struct knote *kn)
168 {
169 
170 	/* Validate the event filter.  */
171 	switch (kn->kn_filter) {
172 	case EVFILT_READ:
173 	case EVFILT_WRITE:
174 		break;
175 	default:
176 		return EINVAL;
177 	}
178 
179 	/* /dev/random may block; /dev/urandom never does.  */
180 	switch (minor(dev)) {
181 	case RND_DEV_RANDOM:
182 		if (kn->kn_filter == EVFILT_READ)
183 			return entropy_kqfilter(kn);
184 		/* FALLTHROUGH */
185 	case RND_DEV_URANDOM:
186 		kn->kn_fop = &seltrue_filtops;
187 		return 0;
188 	default:
189 		return ENXIO;
190 	}
191 }
192 
193 /*
194  * random_read(dev, uio, flags)
195  *
196  *	Generate data from a PRNG seeded from the entropy pool.
197  *
198  *	- If /dev/random, block until we have full entropy, or fail
199  *	  with EWOULDBLOCK, and if `depleting' entropy, return at most
200  *	  the entropy pool's capacity at once.
201  *
202  *	- If /dev/urandom, generate data from whatever is in the
203  *	  entropy pool now.
204  *
205  *	On interrupt, return a short read, but not shorter than 256
206  *	bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is
207  *	512 for hysterical raisins).
208  */
209 static int
210 random_read(dev_t dev, struct uio *uio, int flags)
211 {
212 	uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0};
213 	struct nist_hash_drbg drbg;
214 	uint8_t *buf;
215 	int extractflags;
216 	int error;
217 
218 	/* Get a buffer for transfers.  */
219 	buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP);
220 
221 	/*
222 	 * If it's a short read from /dev/urandom, just generate the
223 	 * output directly with per-CPU cprng_strong.
224 	 */
225 	if (minor(dev) == RND_DEV_URANDOM &&
226 	    uio->uio_resid <= RANDOM_BUFSIZE) {
227 		/* Generate data and transfer it out.  */
228 		cprng_strong(user_cprng, buf, uio->uio_resid, 0);
229 		error = uiomove(buf, uio->uio_resid, uio);
230 		goto out;
231 	}
232 
233 	/*
234 	 * If we're doing a blocking read from /dev/random, wait
235 	 * interruptibly.  Otherwise, don't wait.
236 	 */
237 	if (minor(dev) == RND_DEV_RANDOM && !ISSET(flags, FNONBLOCK))
238 		extractflags = ENTROPY_WAIT|ENTROPY_SIG;
239 	else
240 		extractflags = 0;
241 
242 	/*
243 	 * Query the entropy pool.  For /dev/random, stop here if this
244 	 * fails.  For /dev/urandom, go on either way --
245 	 * entropy_extract will always fill the buffer with what we
246 	 * have from the global pool.
247 	 */
248 	error = entropy_extract(seed, sizeof seed, extractflags);
249 	if (minor(dev) == RND_DEV_RANDOM && error)
250 		goto out;
251 
252 	/* Instantiate the DRBG.  */
253 	if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0,
254 		NULL, 0))
255 		panic("nist_hash_drbg_instantiate");
256 
257 	/* Promptly zero the seed.  */
258 	explicit_memset(seed, 0, sizeof seed);
259 
260 	/* Generate data.  */
261 	error = 0;
262 	while (uio->uio_resid) {
263 		size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
264 
265 		/*
266 		 * Clamp /dev/random output to the entropy capacity and
267 		 * seed size.  Programs can't rely on long reads.
268 		 */
269 		if (minor(dev) == RND_DEV_RANDOM) {
270 			n = MIN(n, ENTROPY_CAPACITY);
271 			n = MIN(n, sizeof seed);
272 			/*
273 			 * Guarantee never to return more than one
274 			 * buffer in this case to minimize bookkeeping.
275 			 */
276 			CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE);
277 			CTASSERT(sizeof seed <= RANDOM_BUFSIZE);
278 		}
279 
280 		/*
281 		 * Try to generate a block of data, but if we've hit
282 		 * the DRBG reseed interval, reseed.
283 		 */
284 		if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) {
285 			/*
286 			 * Get a fresh seed without blocking -- we have
287 			 * already generated some output so it is not
288 			 * useful to block.  This can fail only if the
289 			 * request is obscenely large, so it is OK for
290 			 * either /dev/random or /dev/urandom to fail:
291 			 * we make no promises about gigabyte-sized
292 			 * reads happening all at once.
293 			 */
294 			error = entropy_extract(seed, sizeof seed, 0);
295 			if (error)
296 				break;
297 
298 			/* Reseed and try again.  */
299 			if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed,
300 				NULL, 0))
301 				panic("nist_hash_drbg_reseed");
302 
303 			/* Promptly zero the seed.  */
304 			explicit_memset(seed, 0, sizeof seed);
305 
306 			/* If it fails now, that's a bug.  */
307 			if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0))
308 				panic("nist_hash_drbg_generate");
309 		}
310 
311 		/* Transfer n bytes out.  */
312 		error = uiomove(buf, n, uio);
313 		if (error)
314 			break;
315 
316 		/*
317 		 * If this is /dev/random, stop here, return what we
318 		 * have, and force the next read to reseed.  Programs
319 		 * can't rely on /dev/random for long reads.
320 		 */
321 		if (minor(dev) == RND_DEV_RANDOM) {
322 			error = 0;
323 			break;
324 		}
325 
326 		/* Yield if requested.  */
327 		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
328 			preempt();
329 
330 		/* Check for interruption after at least 256 bytes.  */
331 		CTASSERT(RANDOM_BUFSIZE >= 256);
332 		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
333 		    sigispending(curlwp, 0)) {
334 			error = EINTR;
335 			break;
336 		}
337 	}
338 
339 out:	/* Zero the buffer and free it.  */
340 	explicit_memset(buf, 0, RANDOM_BUFSIZE);
341 	kmem_free(buf, RANDOM_BUFSIZE);
342 
343 	return error;
344 }
345 
346 /*
347  * random_write(dev, uio, flags)
348  *
349  *	Enter data from uio into the entropy pool.
350  *
351  *	Assume privileged users provide full entropy, and unprivileged
352  *	users provide no entropy.  If you have a nonuniform source of
353  *	data with n bytes of min-entropy, hash it with an XOF like
354  *	SHAKE128 into exactly n bytes first.
355  */
356 static int
357 random_write(dev_t dev, struct uio *uio, int flags)
358 {
359 	kauth_cred_t cred = kauth_cred_get();
360 	uint8_t *buf;
361 	bool privileged = false, any = false;
362 	int error = 0;
363 
364 	/* Verify user's authorization to affect the entropy pool.  */
365 	error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA,
366 	    NULL, NULL, NULL, NULL);
367 	if (error)
368 		return error;
369 
370 	/*
371 	 * Check whether user is privileged.  If so, assume user
372 	 * furnishes full-entropy data; if not, accept user's data but
373 	 * assume it has zero entropy when we do accounting.  If you
374 	 * want to specify less entropy, use ioctl(RNDADDDATA).
375 	 */
376 	if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
377 		NULL, NULL, NULL, NULL) == 0)
378 		privileged = true;
379 
380 	/* Get a buffer for transfers.  */
381 	buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP);
382 
383 	/* Consume data.  */
384 	while (uio->uio_resid) {
385 		size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
386 
387 		/* Transfer n bytes in and enter them into the pool.  */
388 		error = uiomove(buf, n, uio);
389 		if (error)
390 			break;
391 		rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0);
392 		any = true;
393 
394 		/* Yield if requested.  */
395 		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
396 			preempt();
397 
398 		/* Check for interruption.  */
399 		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
400 		    sigispending(curlwp, 0)) {
401 			error = EINTR;
402 			break;
403 		}
404 	}
405 
406 	/* Zero the buffer and free it.  */
407 	explicit_memset(buf, 0, RANDOM_BUFSIZE);
408 	kmem_free(buf, RANDOM_BUFSIZE);
409 
410 	/* If we added anything, consolidate entropy now.  */
411 	if (any)
412 		entropy_consolidate();
413 
414 	return error;
415 }
416