xref: /openbsd-src/sys/dev/rnd.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*	$OpenBSD: rnd.c,v 1.225 2022/11/03 04:56:47 guenther Exp $	*/
2 
3 /*
4  * Copyright (c) 2011,2020 Theo de Raadt.
5  * Copyright (c) 2008 Damien Miller.
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2013 Markus Friedl.
8  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, and the entire permission notice in its entirety,
16  *    including the disclaimer of warranties.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote
21  *    products derived from this software without specific prior
22  *    written permission.
23  *
24  * ALTERNATIVELY, this product may be distributed under the terms of
25  * the GNU Public License, in which case the provisions of the GPL are
26  * required INSTEAD OF the above restrictions.  (This clause is
27  * necessary due to a potential bad interaction between the GPL and
28  * the restrictions contained in a BSD-style copyright.)
29  *
30  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
40  * OF THE POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 /*
44  * The bootblocks pre-fill the kernel .openbsd.randomdata section with seed
45  * material (on-disk from previous boot, hopefully mixed with a hardware rng).
46  * The first arc4random(9) call initializes this seed material as a chacha
47  * state.  Calls can be done early in kernel bootstrap code -- early use is
48  * encouraged.
49  *
50  * After the kernel timeout subsystem is initialized, random_start() prepares
51  * the entropy collection mechanism enqueue_randomness() and timeout-driven
52  * mixing into the chacha state.  The first submissions come from device
53  * probes, later on interrupt-time submissions are more common.  Entropy
54  * data (and timing information) get mixed over the entropy input ring
55  * rnd_event_space[] -- the goal is to collect damage.
56  *
57  * Based upon timeouts, a selection of the entropy ring rnd_event_space[]
58  * CRC bit-distributed and XOR mixed into entropy_pool[].
59  *
60  * From time to time, entropy_pool[] is SHA512-whitened, mixed with time
61  * information again, XOR'd with the inner and outer states of the existing
62  * chacha state, to create a new chacha state.
63  *
64  * During early boot (until cold=0), enqueue operations are immediately
65  * dequeued, and mixed into the chacha.
66  */
67 
68 #include <sys/param.h>
69 #include <sys/event.h>
70 #include <sys/ioctl.h>
71 #include <sys/malloc.h>
72 #include <sys/timeout.h>
73 #include <sys/atomic.h>
74 #include <sys/task.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mount.h>
77 #include <sys/syscallargs.h>
78 
79 #include <crypto/sha2.h>
80 
81 #define KEYSTREAM_ONLY
82 #include <crypto/chacha_private.h>
83 
84 #include <uvm/uvm_extern.h>
85 
86 /*
87  * For the purposes of better mixing, we use the CRC-32 polynomial as
88  * well to make a twisted Generalized Feedback Shift Register
89  *
90  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
91  * Transactions on Modeling and Computer Simulation 2(3):179-194.
92  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
93  * II.  ACM Transactions on Modeling and Computer Simulation 4:254-266)
94  */
95 
96 /*
97  * Stirring polynomial over GF(2). Used in add_entropy_words() below.
98  *
99  * The polynomial terms are chosen to be evenly spaced (minimum RMS
100  * distance from evenly spaced; except for the last tap, which is 1 to
101  * get the twisting happening as fast as possible.
102  *
103  * The resultant polynomial is:
104  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
105  */
106 #define POOLWORDS	2048
107 #define POOLBYTES	(POOLWORDS*4)
108 #define POOLMASK	(POOLWORDS - 1)
109 #define	POOL_TAP1	1638
110 #define	POOL_TAP2	1231
111 #define	POOL_TAP3	819
112 #define	POOL_TAP4	411
113 
114 /*
115  * Raw entropy collection from device drivers; at interrupt context or not.
116  * enqueue_randomness() is used to submit data into the entropy input ring.
117  */
118 
119 #define QEVLEN	128		 /* must be a power of 2 */
120 #define QEVCONSUME 8		 /* how many events to consume a time */
121 
122 #define KEYSZ	32
123 #define IVSZ	8
124 #define BLOCKSZ	64
125 #define RSBUFSZ	(16*BLOCKSZ)
126 #define EBUFSIZE KEYSZ + IVSZ
127 
128 struct rand_event {
129 	u_int	re_time;
130 	u_int	re_val;
131 } rnd_event_space[QEVLEN];
132 
133 u_int	rnd_event_cons;
134 u_int	rnd_event_prod;
135 int	rnd_cold = 1;
136 int	rnd_slowextract = 1;
137 
138 void	rnd_reinit(void *v);		/* timeout to start reinit */
139 void	rnd_init(void *);			/* actually do the reinit */
140 
141 static u_int32_t entropy_pool[POOLWORDS];
142 u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
143 
144 void	dequeue_randomness(void *);
145 void	add_entropy_words(const u_int32_t *, u_int);
146 void	extract_entropy(u_int8_t *)
147     __attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
148 
149 struct timeout rnd_timeout = TIMEOUT_INITIALIZER(dequeue_randomness, NULL);
150 
151 int	filt_randomread(struct knote *, long);
152 void	filt_randomdetach(struct knote *);
153 int	filt_randomwrite(struct knote *, long);
154 
155 static void _rs_seed(u_char *, size_t);
156 static void _rs_clearseed(const void *p, size_t s);
157 
158 const struct filterops randomread_filtops = {
159 	.f_flags	= FILTEROP_ISFD,
160 	.f_attach	= NULL,
161 	.f_detach	= filt_randomdetach,
162 	.f_event	= filt_randomread,
163 };
164 
165 const struct filterops randomwrite_filtops = {
166 	.f_flags	= FILTEROP_ISFD,
167 	.f_attach	= NULL,
168 	.f_detach	= filt_randomdetach,
169 	.f_event	= filt_randomwrite,
170 };
171 
172 /*
173  * This function mixes entropy and timing into the entropy input ring.
174  */
175 void
176 enqueue_randomness(u_int val)
177 {
178 	struct rand_event *rep;
179 	int e;
180 
181 	e = (atomic_inc_int_nv(&rnd_event_prod) - 1) & (QEVLEN-1);
182 	rep = &rnd_event_space[e];
183 	rep->re_time += cpu_rnd_messybits();
184 	rep->re_val += val;
185 
186 	if (rnd_cold) {
187 		dequeue_randomness(NULL);
188 		rnd_init(NULL);
189 		if (!cold)
190 			rnd_cold = 0;
191 	} else if (!timeout_pending(&rnd_timeout) &&
192 	    (rnd_event_prod - rnd_event_cons) > QEVCONSUME) {
193 		rnd_slowextract = min(rnd_slowextract * 2, 5000);
194 		timeout_add_msec(&rnd_timeout, rnd_slowextract * 10);
195 	}
196 }
197 
198 /*
199  * This function merges entropy ring information into the buffer using
200  * a polynomial to spread the bits.
201  */
202 void
203 add_entropy_words(const u_int32_t *buf, u_int n)
204 {
205 	/* derived from IEEE 802.3 CRC-32 */
206 	static const u_int32_t twist_table[8] = {
207 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
208 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
209 	};
210 	static u_int	entropy_add_ptr;
211 	static u_char	entropy_input_rotate;
212 
213 	for (; n--; buf++) {
214 		u_int32_t w = (*buf << entropy_input_rotate) |
215 		    (*buf >> ((32 - entropy_input_rotate) & 31));
216 		u_int i = entropy_add_ptr =
217 		    (entropy_add_ptr - 1) & POOLMASK;
218 		/*
219 		 * Normally, we add 7 bits of rotation to the pool.
220 		 * At the beginning of the pool, add an extra 7 bits
221 		 * rotation, so that successive passes spread the
222 		 * input bits across the pool evenly.
223 		 */
224 		entropy_input_rotate =
225 		    (entropy_input_rotate + (i ? 7 : 14)) & 31;
226 
227 		/* XOR pool contents corresponding to polynomial terms */
228 		w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
229 		     entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
230 		     entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
231 		     entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
232 		     entropy_pool[(i + 1) & POOLMASK] ^
233 		     entropy_pool[i]; /* + 2^POOLWORDS */
234 
235 		entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
236 	}
237 }
238 
239 /*
240  * Pulls entropy out of the queue and merges it into the pool with the
241  * CRC.  This takes a mix of fresh entries from the producer end of the
242  * queue and entries from the consumer end of the queue which are
243  * likely to have collected more damage.
244  */
245 /* ARGSUSED */
246 void
247 dequeue_randomness(void *v)
248 {
249 	u_int32_t buf[2];
250 	u_int startp, startc, i;
251 
252 	if (!rnd_cold)
253 		timeout_del(&rnd_timeout);
254 
255 	/* Some very new damage */
256 	startp = rnd_event_prod - QEVCONSUME;
257 	for (i = 0; i < QEVCONSUME; i++) {
258 		u_int e = (startp + i) & (QEVLEN-1);
259 
260 		buf[0] = rnd_event_space[e].re_time;
261 		buf[1] = rnd_event_space[e].re_val;
262 		add_entropy_words(buf, 2);
263 	}
264 	/* and some probably more damaged */
265 	startc = rnd_event_cons;
266 	for (i = 0; i < QEVCONSUME; i++) {
267 		u_int e = (startc + i) & (QEVLEN-1);
268 
269 		buf[0] = rnd_event_space[e].re_time;
270 		buf[1] = rnd_event_space[e].re_val;
271 		add_entropy_words(buf, 2);
272 	}
273 	rnd_event_cons = startp + QEVCONSUME;
274 }
275 
276 /*
277  * Grabs a chunk from the entropy_pool[] and slams it through SHA512 when
278  * requested.
279  */
280 void
281 extract_entropy(u_int8_t *buf)
282 {
283 	static u_int32_t extract_pool[POOLWORDS];
284 	u_char digest[SHA512_DIGEST_LENGTH];
285 	SHA2_CTX shactx;
286 
287 #if SHA512_DIGEST_LENGTH < EBUFSIZE
288 #error "need more bigger hash output"
289 #endif
290 
291 	/*
292 	 * INTENTIONALLY not protected by any lock.  Races during
293 	 * memcpy() result in acceptable input data; races during
294 	 * SHA512Update() would create nasty data dependencies.  We
295 	 * do not rely on this as a benefit, but if it happens, cool.
296 	 */
297 	memcpy(extract_pool, entropy_pool, sizeof(extract_pool));
298 
299 	/* Hash the pool to get the output */
300 	SHA512Init(&shactx);
301 	SHA512Update(&shactx, (u_int8_t *)extract_pool, sizeof(extract_pool));
302 	SHA512Final(digest, &shactx);
303 
304 	/* Copy data to destination buffer */
305 	memcpy(buf, digest, EBUFSIZE);
306 
307 	/*
308 	 * Modify pool so next hash will produce different results.
309 	 * During boot-time enqueue/dequeue stage, avoid recursion.
310 	*/
311 	if (!rnd_cold)
312 		enqueue_randomness(extract_pool[0]);
313 	dequeue_randomness(NULL);
314 
315 	/* Wipe data from memory */
316 	explicit_bzero(extract_pool, sizeof(extract_pool));
317 	explicit_bzero(digest, sizeof(digest));
318 }
319 
320 /* random keystream by ChaCha */
321 
322 struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
323 struct timeout rndreinit_timeout = TIMEOUT_INITIALIZER(rnd_reinit, NULL);
324 struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
325 
326 static chacha_ctx rs;		/* chacha context for random keystream */
327 /* keystream blocks (also chacha seed from boot) */
328 static u_char rs_buf[RSBUFSZ];
329 u_char rs_buf0[RSBUFSZ] __attribute__((section(".openbsd.randomdata")));
330 static size_t rs_have;		/* valid bytes at end of rs_buf */
331 static size_t rs_count;		/* bytes till reseed */
332 
333 void
334 suspend_randomness(void)
335 {
336 	struct timespec ts;
337 
338 	getnanotime(&ts);
339 	enqueue_randomness(ts.tv_sec);
340 	enqueue_randomness(ts.tv_nsec);
341 
342 	dequeue_randomness(NULL);
343 	rs_count = 0;
344 	arc4random_buf(entropy_pool, sizeof(entropy_pool));
345 }
346 
347 void
348 resume_randomness(char *buf, size_t buflen)
349 {
350 	struct timespec ts;
351 
352 	if (buf && buflen)
353 		_rs_seed(buf, buflen);
354 	getnanotime(&ts);
355 	enqueue_randomness(ts.tv_sec);
356 	enqueue_randomness(ts.tv_nsec);
357 
358 	dequeue_randomness(NULL);
359 	rs_count = 0;
360 }
361 
362 static inline void _rs_rekey(u_char *dat, size_t datlen);
363 
364 static inline void
365 _rs_init(u_char *buf, size_t n)
366 {
367 	KASSERT(n >= KEYSZ + IVSZ);
368 	chacha_keysetup(&rs, buf, KEYSZ * 8);
369 	chacha_ivsetup(&rs, buf + KEYSZ, NULL);
370 }
371 
372 static void
373 _rs_seed(u_char *buf, size_t n)
374 {
375 	_rs_rekey(buf, n);
376 
377 	/* invalidate rs_buf */
378 	rs_have = 0;
379 	memset(rs_buf, 0, sizeof(rs_buf));
380 
381 	rs_count = 1600000;
382 }
383 
384 static void
385 _rs_stir(int do_lock)
386 {
387 	struct timespec ts;
388 	u_int8_t buf[EBUFSIZE], *p;
389 	int i;
390 
391 	/*
392 	 * Use SHA512 PRNG data and a system timespec; early in the boot
393 	 * process this is the best we can do -- some architectures do
394 	 * not collect entropy very well during this time, but may have
395 	 * clock information which is better than nothing.
396 	 */
397 	extract_entropy(buf);
398 
399 	nanotime(&ts);
400 	for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
401 		buf[i] ^= p[i];
402 
403 	if (do_lock)
404 		mtx_enter(&rndlock);
405 	_rs_seed(buf, sizeof(buf));
406 	if (do_lock)
407 		mtx_leave(&rndlock);
408 	explicit_bzero(buf, sizeof(buf));
409 
410 	/* encourage fast-dequeue again */
411 	rnd_slowextract = 1;
412 }
413 
414 static inline void
415 _rs_stir_if_needed(size_t len)
416 {
417 	static int rs_initialized;
418 
419 	if (!rs_initialized) {
420 		memcpy(entropy_pool, entropy_pool0, sizeof(entropy_pool));
421 		memcpy(rs_buf, rs_buf0, sizeof(rs_buf));
422 		/* seeds cannot be cleaned yet, random_start() will do so */
423 		_rs_init(rs_buf, KEYSZ + IVSZ);
424 		rs_count = 1024 * 1024 * 1024;	/* until main() runs */
425 		rs_initialized = 1;
426 	} else if (rs_count <= len)
427 		_rs_stir(0);
428 	else
429 		rs_count -= len;
430 }
431 
432 static void
433 _rs_clearseed(const void *p, size_t s)
434 {
435 	struct kmem_dyn_mode kd_avoidalias;
436 	vaddr_t va = trunc_page((vaddr_t)p);
437 	vsize_t off = (vaddr_t)p - va;
438 	vsize_t len;
439 	vaddr_t rwva;
440 	paddr_t pa;
441 
442 	while (s > 0) {
443 		pmap_extract(pmap_kernel(), va, &pa);
444 
445 		memset(&kd_avoidalias, 0, sizeof(kd_avoidalias));
446 		kd_avoidalias.kd_prefer = pa;
447 		kd_avoidalias.kd_waitok = 1;
448 		rwva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
449 		    &kd_avoidalias);
450 		if (!rwva)
451 			panic("_rs_clearseed");
452 
453 		pmap_kenter_pa(rwva, pa, PROT_READ | PROT_WRITE);
454 		pmap_update(pmap_kernel());
455 
456 		len = MIN(s, PAGE_SIZE - off);
457 		explicit_bzero((void *)(rwva + off), len);
458 
459 		pmap_kremove(rwva, PAGE_SIZE);
460 		km_free((void *)rwva, PAGE_SIZE, &kv_any, &kp_none);
461 
462 		va += PAGE_SIZE;
463 		s -= len;
464 		off = 0;
465 	}
466 }
467 
468 static inline void
469 _rs_rekey(u_char *dat, size_t datlen)
470 {
471 #ifndef KEYSTREAM_ONLY
472 	memset(rs_buf, 0, sizeof(rs_buf));
473 #endif
474 	/* fill rs_buf with the keystream */
475 	chacha_encrypt_bytes(&rs, rs_buf, rs_buf, sizeof(rs_buf));
476 	/* mix in optional user provided data */
477 	if (dat) {
478 		size_t i, m;
479 
480 		m = MIN(datlen, KEYSZ + IVSZ);
481 		for (i = 0; i < m; i++)
482 			rs_buf[i] ^= dat[i];
483 	}
484 	/* immediately reinit for backtracking resistance */
485 	_rs_init(rs_buf, KEYSZ + IVSZ);
486 	memset(rs_buf, 0, KEYSZ + IVSZ);
487 	rs_have = sizeof(rs_buf) - KEYSZ - IVSZ;
488 }
489 
490 static inline void
491 _rs_random_buf(void *_buf, size_t n)
492 {
493 	u_char *buf = (u_char *)_buf;
494 	size_t m;
495 
496 	_rs_stir_if_needed(n);
497 	while (n > 0) {
498 		if (rs_have > 0) {
499 			m = MIN(n, rs_have);
500 			memcpy(buf, rs_buf + sizeof(rs_buf) - rs_have, m);
501 			memset(rs_buf + sizeof(rs_buf) - rs_have, 0, m);
502 			buf += m;
503 			n -= m;
504 			rs_have -= m;
505 		}
506 		if (rs_have == 0)
507 			_rs_rekey(NULL, 0);
508 	}
509 }
510 
511 static inline void
512 _rs_random_u32(u_int32_t *val)
513 {
514 	_rs_stir_if_needed(sizeof(*val));
515 	if (rs_have < sizeof(*val))
516 		_rs_rekey(NULL, 0);
517 	memcpy(val, rs_buf + sizeof(rs_buf) - rs_have, sizeof(*val));
518 	memset(rs_buf + sizeof(rs_buf) - rs_have, 0, sizeof(*val));
519 	rs_have -= sizeof(*val);
520 }
521 
522 /* Return one word of randomness from a ChaCha20 generator */
523 u_int32_t
524 arc4random(void)
525 {
526 	u_int32_t ret;
527 
528 	mtx_enter(&rndlock);
529 	_rs_random_u32(&ret);
530 	mtx_leave(&rndlock);
531 	return ret;
532 }
533 
534 /*
535  * Fill a buffer of arbitrary length with ChaCha20-derived randomness.
536  */
537 void
538 arc4random_buf(void *buf, size_t n)
539 {
540 	mtx_enter(&rndlock);
541 	_rs_random_buf(buf, n);
542 	mtx_leave(&rndlock);
543 }
544 
545 /*
546  * Allocate a new ChaCha20 context for the caller to use.
547  */
548 struct arc4random_ctx *
549 arc4random_ctx_new(void)
550 {
551 	char keybuf[KEYSZ + IVSZ];
552 
553 	chacha_ctx *ctx = malloc(sizeof(chacha_ctx), M_TEMP, M_WAITOK);
554 	arc4random_buf(keybuf, KEYSZ + IVSZ);
555 	chacha_keysetup(ctx, keybuf, KEYSZ * 8);
556 	chacha_ivsetup(ctx, keybuf + KEYSZ, NULL);
557 	explicit_bzero(keybuf, sizeof(keybuf));
558 	return (struct arc4random_ctx *)ctx;
559 }
560 
561 /*
562  * Free a ChaCha20 context created by arc4random_ctx_new()
563  */
564 void
565 arc4random_ctx_free(struct arc4random_ctx *ctx)
566 {
567 	explicit_bzero(ctx, sizeof(chacha_ctx));
568 	free(ctx, M_TEMP, sizeof(chacha_ctx));
569 }
570 
571 /*
572  * Use a given ChaCha20 context to fill a buffer
573  */
574 void
575 arc4random_ctx_buf(struct arc4random_ctx *ctx, void *buf, size_t n)
576 {
577 #ifndef KEYSTREAM_ONLY
578 	memset(buf, 0, n);
579 #endif
580 	chacha_encrypt_bytes((chacha_ctx *)ctx, buf, buf, n);
581 }
582 
583 /*
584  * Calculate a uniformly distributed random number less than upper_bound
585  * avoiding "modulo bias".
586  *
587  * Uniformity is achieved by generating new random numbers until the one
588  * returned is outside the range [0, 2**32 % upper_bound).  This
589  * guarantees the selected random number will be inside
590  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
591  * after reduction modulo upper_bound.
592  */
593 u_int32_t
594 arc4random_uniform(u_int32_t upper_bound)
595 {
596 	u_int32_t r, min;
597 
598 	if (upper_bound < 2)
599 		return 0;
600 
601 	/* 2**32 % x == (2**32 - x) % x */
602 	min = -upper_bound % upper_bound;
603 
604 	/*
605 	 * This could theoretically loop forever but each retry has
606 	 * p > 0.5 (worst case, usually far better) of selecting a
607 	 * number inside the range we need, so it should rarely need
608 	 * to re-roll.
609 	 */
610 	for (;;) {
611 		r = arc4random();
612 		if (r >= min)
613 			break;
614 	}
615 
616 	return r % upper_bound;
617 }
618 
619 /* ARGSUSED */
620 void
621 rnd_init(void *null)
622 {
623 	_rs_stir(1);
624 }
625 
626 /*
627  * Called by timeout to mark arc4 for stirring,
628  */
629 void
630 rnd_reinit(void *v)
631 {
632 	task_add(systq, &rnd_task);
633 	/* 10 minutes, per dm@'s suggestion */
634 	timeout_add_sec(&rndreinit_timeout, 10 * 60);
635 }
636 
637 /*
638  * Start periodic services inside the random subsystem, which pull
639  * entropy forward, hash it, and re-seed the random stream as needed.
640  */
641 void
642 random_start(int goodseed)
643 {
644 	extern char etext[];
645 
646 #if !defined(NO_PROPOLICE)
647 	extern long __guard_local;
648 
649 	if (__guard_local == 0)
650 		printf("warning: no entropy supplied by boot loader\n");
651 #endif
652 
653 	_rs_clearseed(entropy_pool0, sizeof(entropy_pool0));
654 	_rs_clearseed(rs_buf0, sizeof(rs_buf0));
655 
656 	/* Message buffer may contain data from previous boot */
657 	if (msgbufp->msg_magic == MSG_MAGIC)
658 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
659 		    msgbufp->msg_bufs / sizeof(u_int32_t));
660 	add_entropy_words((u_int32_t *)etext - 32*1024,
661 	    8192/sizeof(u_int32_t));
662 
663 	dequeue_randomness(NULL);
664 	rnd_init(NULL);
665 	rnd_reinit(NULL);
666 
667 	if (goodseed)
668 		printf("random: good seed from bootblocks\n");
669 	else {
670 		/* XXX kernel should work harder here */
671 		printf("random: boothowto does not indicate good seed\n");
672 	}
673 }
674 
675 int
676 randomopen(dev_t dev, int flag, int mode, struct proc *p)
677 {
678 	return 0;
679 }
680 
681 int
682 randomclose(dev_t dev, int flag, int mode, struct proc *p)
683 {
684 	return 0;
685 }
686 
687 /*
688  * Maximum number of bytes to serve directly from the main ChaCha
689  * pool. Larger requests are served from a discrete ChaCha instance keyed
690  * from the main pool.
691  */
692 #define RND_MAIN_MAX_BYTES	2048
693 
694 int
695 randomread(dev_t dev, struct uio *uio, int ioflag)
696 {
697 	struct arc4random_ctx *lctx = NULL;
698 	size_t		total = uio->uio_resid;
699 	u_char		*buf;
700 	int		ret = 0;
701 
702 	if (uio->uio_resid == 0)
703 		return 0;
704 
705 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
706 	if (total > RND_MAIN_MAX_BYTES)
707 		lctx = arc4random_ctx_new();
708 
709 	while (ret == 0 && uio->uio_resid > 0) {
710 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
711 
712 		if (lctx != NULL)
713 			arc4random_ctx_buf(lctx, buf, n);
714 		else
715 			arc4random_buf(buf, n);
716 		ret = uiomove(buf, n, uio);
717 		if (ret == 0 && uio->uio_resid > 0)
718 			yield();
719 	}
720 	if (lctx != NULL)
721 		arc4random_ctx_free(lctx);
722 	explicit_bzero(buf, POOLBYTES);
723 	free(buf, M_TEMP, POOLBYTES);
724 	return ret;
725 }
726 
727 int
728 randomwrite(dev_t dev, struct uio *uio, int flags)
729 {
730 	int		ret = 0, newdata = 0;
731 	u_int32_t	*buf;
732 
733 	if (uio->uio_resid == 0)
734 		return 0;
735 
736 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
737 
738 	while (ret == 0 && uio->uio_resid > 0) {
739 		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
740 
741 		ret = uiomove(buf, n, uio);
742 		if (ret != 0)
743 			break;
744 		while (n % sizeof(u_int32_t))
745 			((u_int8_t *)buf)[n++] = 0;
746 		add_entropy_words(buf, n / 4);
747 		if (uio->uio_resid > 0)
748 			yield();
749 		newdata = 1;
750 	}
751 
752 	if (newdata)
753 		rnd_init(NULL);
754 
755 	explicit_bzero(buf, POOLBYTES);
756 	free(buf, M_TEMP, POOLBYTES);
757 	return ret;
758 }
759 
760 int
761 randomkqfilter(dev_t dev, struct knote *kn)
762 {
763 	switch (kn->kn_filter) {
764 	case EVFILT_READ:
765 		kn->kn_fop = &randomread_filtops;
766 		break;
767 	case EVFILT_WRITE:
768 		kn->kn_fop = &randomwrite_filtops;
769 		break;
770 	default:
771 		return (EINVAL);
772 	}
773 
774 	return (0);
775 }
776 
777 void
778 filt_randomdetach(struct knote *kn)
779 {
780 }
781 
782 int
783 filt_randomread(struct knote *kn, long hint)
784 {
785 	kn->kn_data = RND_MAIN_MAX_BYTES;
786 	return (1);
787 }
788 
789 int
790 filt_randomwrite(struct knote *kn, long hint)
791 {
792 	kn->kn_data = POOLBYTES;
793 	return (1);
794 }
795 
796 int
797 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
798 {
799 	switch (cmd) {
800 	case FIOASYNC:
801 		/* No async flag in softc so this is a no-op. */
802 		break;
803 	case FIONBIO:
804 		/* Handled in the upper FS layer. */
805 		break;
806 	default:
807 		return ENOTTY;
808 	}
809 	return 0;
810 }
811 
812 int
813 sys_getentropy(struct proc *p, void *v, register_t *retval)
814 {
815 	struct sys_getentropy_args /* {
816 		syscallarg(void *) buf;
817 		syscallarg(size_t) nbyte;
818 	} */ *uap = v;
819 	char buf[256];
820 	int error;
821 
822 	if (SCARG(uap, nbyte) > sizeof(buf))
823 		return (EIO);
824 	arc4random_buf(buf, SCARG(uap, nbyte));
825 	if ((error = copyout(buf, SCARG(uap, buf), SCARG(uap, nbyte))) != 0)
826 		return (error);
827 	explicit_bzero(buf, sizeof(buf));
828 	*retval = 0;
829 	return (0);
830 }
831