xref: /dflybsd-src/sys/kern/kern_nrandom.c (revision 225cb38fefe493d6b322d9e7699d9987ec2c56e5)
1 /*
2  * Copyright (c) 2004-2014 The DragonFly Project. All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * by Alex Hornung <alex@alexhornung.com>
7  * by Robin J Carey
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification, immediately at the beginning of the file.
15  * 2. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 /*			   --- NOTES ---
31  *
32  * Note: The word "entropy" is often incorrectly used to describe
33  * random data. The word "entropy" originates from the science of
34  * Physics. The correct descriptive definition would be something
35  * along the lines of "seed", "unpredictable numbers" or
36  * "unpredictable data".
37  *
38  * Note: Some /dev/[u]random implementations save "seed" between
39  * boots which represents a security hazard since an adversary
40  * could acquire this data (since it is stored in a file). If
41  * the unpredictable data used in the above routines is only
42  * generated during Kernel operation, then an adversary can only
43  * acquire that data through a Kernel security compromise and/or
44  * a cryptographic algorithm failure/cryptanalysis.
45  *
46  * Note: On FreeBSD-4.11, interrupts have to be manually enabled
47  * using the rndcontrol(8) command.
48  *
49  *		--- DESIGN (FreeBSD-4.11 based) ---
50  *
51  *   The rnddev module automatically initializes itself the first time
52  * it is used (client calls any public rnddev_*() interface routine).
53  * Both CSPRNGs are initially seeded from the precise nano[up]time() routines.
54  * Tests show this method produces good enough results, suitable for intended
55  * use. It is necessary for both CSPRNGs to be completely seeded, initially.
56  *
57  *   After initialization and during Kernel operation the only suitable
58  * unpredictable data available is:
59  *
60  *	(1) Keyboard scan-codes.
61  *	(2) Nanouptime acquired by a Keyboard/Read-Event.
62  *	(3) Suitable interrupt source; hard-disk/ATA-device.
63  *
64  *      (X) Mouse-event (xyz-data unsuitable); NOT IMPLEMENTED.
65  *
66  *   This data is added to both CSPRNGs in real-time as it happens/
67  * becomes-available. Additionally, unpredictable (?) data may be
68  * acquired from a true-random number generator if such a device is
69  * available to the system (not advisable !).
70  *   Nanouptime() acquired by a Read-Event is a very important aspect of
71  * this design, since it ensures that unpredictable data is added to
72  * the CSPRNGs even if there are no other sources.
73  *   The nanouptime() Kernel routine is used since time relative to
74  * boot is less adversary-known than time itself.
75  *
76  *   This design has been thoroughly tested with debug logging
77  * and the output from both /dev/random and /dev/urandom has
78  * been tested with the DIEHARD test-suite; both pass.
79  *
80  * MODIFICATIONS MADE TO ORIGINAL "kern_random.c":
81  *
82  * 6th July 2005:
83  *
84  * o Changed ReadSeed() function to schedule future read-seed-events
85  *   by at least one second. Previous implementation used a randomised
86  *   scheduling { 0, 1, 2, 3 seconds }.
87  * o Changed SEED_NANOUP() function to use a "previous" accumulator
88  *   algorithm similar to ReadSeed(). This ensures that there is no
89  *   way that an adversary can tell what number is being added to the
90  *   CSPRNGs, since the number added to the CSPRNGs at Event-Time is
91  *   the sum of nanouptime()@Event and an unknown/secret number.
92  * o Changed rnddev_add_interrupt() function to schedule future
93  *   interrupt-events by at least one second. Previous implementation
94  *   had no scheduling algorithm which allowed an "interrupt storm"
95  *   to occur resulting in skewed data entering into the CSPRNGs.
96  *
97  *
98  * 9th July 2005:
99  *
100  * o Some small cleanups and change all internal functions to be
101  *   static/private.
102  * o Removed ReadSeed() since its functionality is already performed
103  *   by another function { rnddev_add_interrupt_OR_read() } and remove
104  *   the silly rndByte accumulator/feedback-thing (since multipying by
105  *   rndByte could yield a value of 0).
106  * o Made IBAA/L14 public interface become static/private;
107  *   Local to this file (not changed to that in the original C modules).
108  *
109  * 16th July 2005:
110  *
111  * o SEED_NANOUP() -> NANOUP_EVENT() function rename.
112  * o Make NANOUP_EVENT() handle the time-buffering directly so that all
113  *   time-stamp-events use this single time-buffer (including keyboard).
114  *   This removes dependancy on "time_second" Kernel variable.
115  * o Removed second-time-buffer code in rnddev_add_interrupt_OR_read (void).
116  * o Rewrote the time-buffering algorithm in NANOUP_EVENT() to use a
117  *   randomised time-delay range.
118  *
119  * 12th Dec 2005:
120  *
121  * o Updated to (hopefully final) L15 algorithm.
122  *
123  * 12th June 2006:
124  *
125  * o Added missing (u_char *) cast in RnddevRead() function.
126  * o Changed copyright to 3-clause BSD license and cleaned up the layout
127  *   of this file.
128  *
129  * For a proper changelog, refer to the version control history of this
130  * file.
131  */
132 
133 #include <sys/types.h>
134 #include <sys/kernel.h>
135 #include <sys/systm.h>
136 #include <sys/poll.h>
137 #include <sys/event.h>
138 #include <sys/random.h>
139 #include <sys/systimer.h>
140 #include <sys/time.h>
141 #include <sys/proc.h>
142 #include <sys/lock.h>
143 #include <sys/sysctl.h>
144 #include <sys/sysproto.h>
145 #include <sys/spinlock.h>
146 #include <sys/csprng.h>
147 #include <machine/atomic.h>
148 #include <machine/clock.h>
149 
150 #include <sys/spinlock2.h>
151 #include <sys/signal2.h>
152 
153 struct csprng_state csprng_state;
154 
155 /*
156  * Portability note: The u_char/unsigned char type is used where
157  * uint8_t from <stdint.h> or u_int8_t from <sys/types.h> should really
158  * be being used. On FreeBSD, it is safe to make the assumption that these
159  * different types are equivalent (on all architectures).
160  * The FreeBSD <sys/crypto/rc4> module also makes this assumption.
161  */
162 
163 /*------------------------------ IBAA ----------------------------------*/
164 
165 /*-------------------------- IBAA CSPRNG -------------------------------*/
166 
167 /*
168  * NOTE: The original source code from which this source code (IBAA)
169  *       was taken has no copyright/license. The algorithm has no patent
170  *       and is freely/publicly available from:
171  *
172  *           http://www.burtleburtle.net/bob/rand/isaac.html
173  */
174 
175 /*
176  * ^ means XOR, & means bitwise AND, a<<b means shift a by b.
177  * barrel(a) shifts a 19 bits to the left, and bits wrap around
178  * ind(x) is (x AND 255), or (x mod 256)
179  */
180 typedef	u_int32_t	u4;   /* unsigned four bytes, 32 bits */
181 
182 #define	ALPHA		(8)
183 #define	SIZE		(1 << ALPHA)
184 #define MASK		(SIZE - 1)
185 #define	ind(x)		((x) & (SIZE - 1))
186 #define	barrel(a)	(((a) << 20) ^ ((a) >> 12))  /* beta=32,shift=20 */
187 
188 static void IBAA
189 (
190 	u4 *m,		/* Memory: array of SIZE ALPHA-bit terms */
191 	u4 *r,		/* Results: the sequence, same size as m */
192 	u4 *aa,		/* Accumulator: a single value */
193 	u4 *bb,		/* the previous result */
194 	u4 *counter	/* counter */
195 )
196 {
197 	u4 a, b, x, y, i;
198 
199 	a = *aa;
200 	b = *bb + *counter;
201 	++*counter;
202 	for (i = 0; i < SIZE; ++i) {
203 		x = m[i];
204 		a = barrel(a) + m[ind(i + (SIZE / 2))];	/* set a */
205 		m[i] = y = m[ind(x)] + a + b;		/* set m */
206 		r[i] = b = m[ind(y >> ALPHA)] + x;	/* set r */
207 	}
208 	*bb = b; *aa = a;
209 }
210 
211 /*-------------------------- IBAA CSPRNG -------------------------------*/
212 
213 
214 static u4	IBAA_memory[SIZE];
215 static u4	IBAA_results[SIZE];
216 static u4	IBAA_aa;
217 static u4	IBAA_bb;
218 static u4	IBAA_counter;
219 
220 static volatile int IBAA_byte_index;
221 
222 
223 static void	IBAA_Init(void);
224 static void	IBAA_Call(void);
225 static void	IBAA_Seed(const u_int32_t val);
226 static u_char	IBAA_Byte(void);
227 
228 /*
229  * Initialize IBAA.
230  */
231 static void
232 IBAA_Init(void)
233 {
234 	size_t	i;
235 
236 	for (i = 0; i < SIZE; ++i) {
237 		IBAA_memory[i] = i;
238 	}
239 	IBAA_aa = IBAA_bb = 0;
240 	IBAA_counter = 0;
241 	IBAA_byte_index = sizeof(IBAA_results);	/* force IBAA_Call() */
242 }
243 
244 /*
245  * PRIVATE: Call IBAA to produce 256 32-bit u4 results.
246  */
247 static void
248 IBAA_Call (void)
249 {
250 	IBAA(IBAA_memory, IBAA_results, &IBAA_aa, &IBAA_bb, &IBAA_counter);
251 	IBAA_byte_index = 0;
252 }
253 
254 /*
255  * Add a 32-bit u4 seed value into IBAAs memory.  Mix the low 4 bits
256  * with 4 bits of PNG data to reduce the possibility of a seeding-based
257  * attack.
258  */
259 static void
260 IBAA_Seed (const u_int32_t val)
261 {
262 	static int memIndex;
263 	u4 *iptr;
264 
265 	iptr = &IBAA_memory[memIndex & MASK];
266 	*iptr = ((*iptr << 3) | (*iptr >> 29)) + (val ^ (IBAA_Byte() & 15));
267 	++memIndex;
268 }
269 
270 static void
271 IBAA_Vector (const char *buf, int bytes)
272 {
273 	int i;
274 
275 	while (bytes >= sizeof(int)) {
276 		IBAA_Seed(*(const int *)buf);
277 		buf += sizeof(int);
278 		bytes -= sizeof(int);
279 	}
280 
281 	/*
282 	 * Warm up the generator to get rid of weak initial states.
283 	 */
284 	for (i = 0; i < 10; ++i)
285 		IBAA_Call();
286 }
287 
288 /*
289  * Extract a byte from IBAAs 256 32-bit u4 results array.
290  *
291  * NOTE: This code is designed to prevent MP races from taking
292  * IBAA_byte_index out of bounds.
293  */
294 static u_char
295 IBAA_Byte(void)
296 {
297 	u_char result;
298 	int index;
299 
300 	index = IBAA_byte_index;
301 	if (index == sizeof(IBAA_results)) {
302 		IBAA_Call();
303 		index = 0;
304 	}
305 	result = ((u_char *)IBAA_results)[index];
306 	IBAA_byte_index = index + 1;
307 	return result;
308 }
309 
310 /*------------------------------ IBAA ----------------------------------*/
311 
312 
313 /*------------------------------- L15 ----------------------------------*/
314 
315 /*
316  * IMPORTANT NOTE: LByteType must be exactly 8-bits in size or this software
317  * will not function correctly.
318  */
319 typedef unsigned char	LByteType;
320 
321 #define	L15_STATE_SIZE	256
322 
323 static LByteType	L15_x, L15_y;
324 static LByteType	L15_start_x;
325 static LByteType	L15_state[L15_STATE_SIZE];
326 
327 /*
328  * PRIVATE FUNCS:
329  */
330 
331 static void		L15_Swap(const LByteType pos1, const LByteType pos2);
332 static void		L15_InitState(void);
333 static void		L15_KSA(const LByteType * const key,
334 				const size_t keyLen);
335 static void		L15_Discard(const LByteType numCalls);
336 
337 /*
338  * PUBLIC INTERFACE:
339  */
340 static void		L15(const LByteType * const key, const size_t keyLen);
341 static LByteType	L15_Byte(void);
342 static void		L15_Vector(const LByteType * const key,
343 				const size_t keyLen);
344 
345 static __inline void
346 L15_Swap(const LByteType pos1, const LByteType pos2)
347 {
348 	const LByteType	save1 = L15_state[pos1];
349 
350 	L15_state[pos1] = L15_state[pos2];
351 	L15_state[pos2] = save1;
352 }
353 
354 static void
355 L15_InitState (void)
356 {
357 	size_t i;
358 	for (i = 0; i < L15_STATE_SIZE; ++i)
359 		L15_state[i] = i;
360 }
361 
362 #define  L_SCHEDULE(xx)						\
363 								\
364 for (i = 0; i < L15_STATE_SIZE; ++i) {				\
365     L15_Swap(i, (stateIndex += (L15_state[i] + (xx))));		\
366 }
367 
368 static void
369 L15_KSA (const LByteType * const key, const size_t keyLen)
370 {
371 	size_t	i, keyIndex;
372 	static LByteType stateIndex = 0;
373 
374 	for (keyIndex = 0; keyIndex < keyLen; ++keyIndex) {
375 		L_SCHEDULE(key[keyIndex]);
376 	}
377 	L_SCHEDULE(keyLen);
378 }
379 
380 static void
381 L15_Discard(const LByteType numCalls)
382 {
383 	LByteType i;
384 	for (i = 0; i < numCalls; ++i) {
385 		(void)L15_Byte();
386 	}
387 }
388 
389 
390 /*
391  * PUBLIC INTERFACE:
392  */
393 static void
394 L15(const LByteType * const key, const size_t keyLen)
395 {
396 	L15_x = L15_start_x = 0;
397 	L15_y = L15_STATE_SIZE - 1;
398 	L15_InitState();
399 	L15_KSA(key, keyLen);
400 	L15_Discard(L15_Byte());
401 }
402 
403 static LByteType
404 L15_Byte(void)
405 {
406 	LByteType z;
407 
408 	L15_Swap(L15_state[L15_x], L15_y);
409 	z = (L15_state [L15_x++] + L15_state[L15_y--]);
410 	if (L15_x == L15_start_x) {
411 		--L15_y;
412 	}
413 	return (L15_state[z]);
414 }
415 
416 static void
417 L15_Vector (const LByteType * const key, const size_t keyLen)
418 {
419 	L15_KSA(key, keyLen);
420 }
421 
422 /*------------------------------- L15 ----------------------------------*/
423 
424 /************************************************************************
425  *				KERNEL INTERFACE			*
426  ************************************************************************
427  *
428  * By Robin J Carey, Matthew Dillon and Alex Hornung.
429  */
430 
431 static int rand_thread_value;
432 static void NANOUP_EVENT(void);
433 static thread_t rand_td;
434 static struct spinlock rand_spin;
435 
436 static int sysctl_kern_random(SYSCTL_HANDLER_ARGS);
437 
438 static int nrandevents;
439 static int rand_mode = 2;
440 static struct systimer systimer_rand;
441 
442 static int sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS);
443 
444 SYSCTL_INT(_kern, OID_AUTO, nrandevents, CTLFLAG_RD, &nrandevents, 0, "");
445 SYSCTL_PROC(_kern, OID_AUTO, random, CTLFLAG_RD | CTLFLAG_ANYBODY, 0, 0,
446 		sysctl_kern_random, "I", "Acquire random data");
447 SYSCTL_PROC(_kern, OID_AUTO, rand_mode, CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
448     sysctl_kern_rand_mode, "A", "RNG mode (csprng, ibaa or mixed)");
449 
450 
451 /*
452  * Called from early boot (pre-SMP)
453  */
454 void
455 rand_initialize(void)
456 {
457 	struct timespec	now;
458 	int i;
459 
460 	csprng_init(&csprng_state);
461 #if 0
462 	/*
463 	 * XXX: we do the reseeding when someone uses the RNG instead
464 	 * of regularly using init_reseed (which initializes a callout)
465 	 * to avoid unnecessary and regular reseeding.
466 	 */
467 	csprng_init_reseed(&csprng_state);
468 #endif
469 
470 
471 	spin_init(&rand_spin, "randinit");
472 
473 	/* Initialize IBAA. */
474 	IBAA_Init();
475 
476 	/* Initialize L15. */
477 	nanouptime(&now);
478 	L15((const LByteType *)&now.tv_nsec, sizeof(now.tv_nsec));
479 	for (i = 0; i < (SIZE / 2); ++i) {
480 		nanotime(&now);
481 		add_buffer_randomness_src((const uint8_t *)&now.tv_nsec,
482 		    sizeof(now.tv_nsec), RAND_SRC_TIMING);
483 		nanouptime(&now);
484 		add_buffer_randomness_src((const uint8_t *)&now.tv_nsec,
485 		    sizeof(now.tv_nsec), RAND_SRC_TIMING);
486 	}
487 
488 	/*
489 	 * Warm up the generator to get rid of weak initial states.
490 	 */
491 	for (i = 0; i < 10; ++i)
492 		IBAA_Call();
493 }
494 
495 /*
496  * Keyboard events
497  */
498 void
499 add_keyboard_randomness(u_char scancode)
500 {
501 	spin_lock(&rand_spin);
502 	L15_Vector((const LByteType *) &scancode, sizeof (scancode));
503 	spin_unlock(&rand_spin);
504 	add_interrupt_randomness(0);
505 }
506 
507 /*
508  * Interrupt events.  This is SMP safe and allowed to race.
509  *
510  * This adjusts rand_thread_value which will be incorporated into the next
511  * time-buffered seed.  It does not effect the seeding period per-say.
512  */
513 void
514 add_interrupt_randomness(int intr)
515 {
516 	if (tsc_present) {
517 		rand_thread_value = (rand_thread_value << 4) ^ 1 ^
518 		((int)rdtsc() % 151);
519 	}
520 	++rand_thread_value;				/* ~1 bit */
521 }
522 
523 /*
524  * True random number source
525  */
526 int
527 add_buffer_randomness(const char *buf, int bytes)
528 {
529 	spin_lock(&rand_spin);
530 	L15_Vector((const LByteType *)buf, bytes);
531 	IBAA_Vector(buf, bytes);
532 	spin_unlock(&rand_spin);
533 
534 	atomic_add_int(&nrandevents, 1);
535 
536 	csprng_add_entropy(&csprng_state, RAND_SRC_UNKNOWN,
537 	    (const uint8_t *)buf, bytes, 0);
538 
539 	return 0;
540 }
541 
542 
543 int
544 add_buffer_randomness_src(const char *buf, int bytes, int srcid)
545 {
546 	spin_lock(&rand_spin);
547 	L15_Vector((const LByteType *)buf, bytes);
548 	IBAA_Vector(buf, bytes);
549 	spin_unlock(&rand_spin);
550 
551 	atomic_add_int(&nrandevents, 1);
552 
553 	csprng_add_entropy(&csprng_state, srcid & 0xff,
554 	    (const uint8_t *)buf, bytes, 0);
555 
556 	return 0;
557 }
558 
559 
560 /*
561  * Kqueue filter (always succeeds)
562  */
563 int
564 random_filter_read(struct knote *kn, long hint)
565 {
566 	return (1);
567 }
568 
569 /*
570  * Heavy weight random number generator.  May return less then the
571  * requested number of bytes.
572  *
573  * Instead of stopping early,
574  */
575 u_int
576 read_random(void *buf, u_int nbytes)
577 {
578 	int i, j;
579 
580 	if (rand_mode == 0) {
581 		/* Only use CSPRNG */
582 		i = csprng_get_random(&csprng_state, buf, nbytes, 0);
583 	} else if (rand_mode == 1) {
584 		/* Only use IBAA */
585 		spin_lock(&rand_spin);
586 		for (i = 0; i < nbytes; i++)
587 			((u_char *)buf)[i] = IBAA_Byte();
588 		spin_unlock(&rand_spin);
589 	} else {
590 		/* Mix both CSPRNG and IBAA */
591 		i = csprng_get_random(&csprng_state, buf, nbytes, 0);
592 		spin_lock(&rand_spin);
593 		for (j = 0; j < i; j++)
594 			((u_char *)buf)[j] ^= IBAA_Byte();
595 		spin_unlock(&rand_spin);
596 	}
597 
598 	add_interrupt_randomness(0);
599 	return (i > 0) ? i : 0;
600 }
601 
602 /*
603  * Heavy weight random number generator.  Must return the requested
604  * number of bytes.
605  */
606 u_int
607 read_random_unlimited(void *buf, u_int nbytes)
608 {
609 	u_int i;
610 
611 	spin_lock(&rand_spin);
612 	for (i = 0; i < nbytes; ++i)
613 		((u_char *)buf)[i] = IBAA_Byte();
614 	spin_unlock(&rand_spin);
615 	add_interrupt_randomness(0);
616 	return (i);
617 }
618 
619 /*
620  * Read random data via sysctl().
621  */
622 static
623 int
624 sysctl_kern_random(SYSCTL_HANDLER_ARGS)
625 {
626 	char buf[256];
627 	size_t n;
628 	size_t r;
629 	int error = 0;
630 
631 	n = req->oldlen;
632 	if (n > 1024 * 1024)
633 		n = 1024 * 1024;
634 	while (n > 0) {
635 		if ((r = n) > sizeof(buf))
636 			r = sizeof(buf);
637 		read_random_unlimited(buf, r);
638 		error = SYSCTL_OUT(req, buf, r);
639 		if (error)
640 			break;
641 		n -= r;
642 	}
643 	return(error);
644 }
645 
646 int
647 sys_getrandom(struct getrandom_args *uap)
648 {
649 	char buf[256];
650 	ssize_t bytes;
651 	ssize_t r;
652 	ssize_t n;
653 	int error;
654 	int sigcnt;
655 
656 	bytes = (ssize_t)uap->len;
657 	if (bytes < 0)
658 		return EINVAL;
659 
660 	r = 0;
661 	error = 0;
662 	sigcnt = 0;
663 
664 	while (r < bytes) {
665 		n = (ssize_t)sizeof(buf);
666 		if (n > bytes - r)
667 			n = bytes - r;
668 		read_random_unlimited(buf, n);
669 		error = copyout(buf, (char *)uap->buf + r, n);
670 		if (error)
671 			break;
672 		r += n;
673 		lwkt_user_yield();
674 		if (++sigcnt == 128) {
675 			sigcnt = 0;
676 			if (CURSIG_NOBLOCK(curthread->td_lwp) != 0) {
677 				error = EINTR;
678 				break;
679 			}
680 		}
681 	}
682 	if (error == 0)
683 		uap->sysmsg_szresult = r;
684 
685 	return error;
686 }
687 
688 /*
689  * Change the random mode via sysctl().
690  */
691 static
692 const char *
693 rand_mode_to_str(int mode)
694 {
695 	switch (mode) {
696 	case 0:
697 		return "csprng";
698 	case 1:
699 		return "ibaa";
700 	case 2:
701 		return "mixed";
702 	default:
703 		return "unknown";
704 	}
705 }
706 
707 static
708 int
709 sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS)
710 {
711 	char mode[32];
712 	int error;
713 
714 	strncpy(mode, rand_mode_to_str(rand_mode), sizeof(mode)-1);
715 	error = sysctl_handle_string(oidp, mode, sizeof(mode), req);
716 	if (error || req->newptr == NULL)
717 	    return error;
718 
719 	if ((strncmp(mode, "csprng", sizeof(mode))) == 0)
720 		rand_mode = 0;
721 	else if ((strncmp(mode, "ibaa", sizeof(mode))) == 0)
722 		rand_mode = 1;
723 	else if ((strncmp(mode, "mixed", sizeof(mode))) == 0)
724 		rand_mode = 2;
725 	else
726 		error = EINVAL;
727 
728 	return error;
729 }
730 
731 /*
732  * Random number generator helper thread.  This limits code overhead from
733  * high frequency events by delaying the clearing of rand_thread_value.
734  *
735  * This is a time-buffered loop, with a randomizing delay.  Note that interrupt
736  * entropy does not cause the thread to wakeup any faster, but does improve the
737  * quality of the entropy produced.
738  */
739 static
740 void
741 rand_thread_loop(void *dummy)
742 {
743 	int64_t count;
744 
745 	for (;;) {
746 		/*
747 		 * Generate entropy.
748 		 */
749 		NANOUP_EVENT();
750 		spin_lock(&rand_spin);
751 		count = (uint8_t)L15_Byte();
752 		spin_unlock(&rand_spin);
753 
754 		/*
755 		 * Calculate 1/10 of a second to 2/10 of a second, fine-grained
756 		 * using a L15_Byte() feedback.
757 		 *
758 		 * Go faster in the first 1200 seconds after boot.  This effects
759 		 * the time-after-next interrupt (pipeline delay).
760 		 */
761 		count = sys_cputimer->freq * (count + 256) / (256 * 10);
762 		if (time_uptime < 120)
763 			count = count / 10 + 1;
764 		systimer_rand.periodic = count;
765 
766 		tsleep(rand_td, 0, "rwait", 0);
767 	}
768 }
769 
770 /*
771  * Systimer trigger - fine-grained random trigger
772  */
773 static
774 void
775 rand_thread_wakeup(struct systimer *timer, int in_ipi, struct intrframe *frame)
776 {
777 	wakeup(rand_td);
778 }
779 
780 static
781 void
782 rand_thread_init(void)
783 {
784 	systimer_init_periodic_nq(&systimer_rand, rand_thread_wakeup, NULL, 25);
785 	lwkt_create(rand_thread_loop, NULL, &rand_td, NULL, 0, 0, "random");
786 }
787 
788 SYSINIT(rand, SI_SUB_HELPER_THREADS, SI_ORDER_ANY, rand_thread_init, 0);
789 
790 /*
791  * Caller is time-buffered.  Incorporate any accumulated interrupt randomness
792  * as well as the high frequency bits of the TSC.
793  *
794  * A delta nanoseconds value is used to remove absolute time from the generated
795  * entropy.  Even though we are pushing 32 bits, this entropy is probably only
796  * good for one or two bits without any interrupt sources, and possibly 8 bits with.
797  */
798 static void
799 NANOUP_EVENT(void)
800 {
801 	static struct timespec	last;
802 	struct timespec		now;
803 	int			nsec;
804 
805 	/*
806 	 * Delta nanoseconds since last event
807 	 */
808 	nanouptime(&now);
809 	nsec = now.tv_nsec - last.tv_nsec;
810 	last = now;
811 
812 	/*
813 	 * Interrupt randomness.
814 	 */
815 	nsec ^= rand_thread_value;
816 
817 	/*
818 	 * The TSC, if present, generally has an even higher
819 	 * resolution.  Integrate a portion of it into our seed.
820 	 */
821 	if (tsc_present)
822 		nsec ^= (rdtsc() & 255) << 8;
823 
824 	/*
825 	 * Ok.
826 	 */
827 
828 	add_buffer_randomness_src((const uint8_t *)&nsec, sizeof(nsec), RAND_SRC_INTR);
829 }
830 
831