xref: /openbsd-src/sys/dev/rnd.c (revision 5054e3e78af0749a9bb00ba9a024b3ee2d90290f)
1 /*	$OpenBSD: rnd.c,v 1.101 2009/11/09 17:53:39 nicm Exp $	*/
2 
3 /*
4  * rnd.c -- A strong random number generator
5  *
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2008 Damien Miller.
8  *
9  * Version 1.89, last modified 19-Sep-99
10  *
11  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, and the entire permission notice in its entirety,
19  *    including the disclaimer of warranties.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. The name of the author may not be used to endorse or promote
24  *    products derived from this software without specific prior
25  *    written permission.
26  *
27  * ALTERNATIVELY, this product may be distributed under the terms of
28  * the GNU Public License, in which case the provisions of the GPL are
29  * required INSTEAD OF the above restrictions.  (This clause is
30  * necessary due to a potential bad interaction between the GPL and
31  * the restrictions contained in a BSD-style copyright.)
32  *
33  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
34  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
36  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 /*
47  * (now, with legal B.S. out of the way.....)
48  *
49  * This routine gathers environmental noise from device drivers, etc.,
50  * and returns good random numbers, suitable for cryptographic use.
51  * Besides the obvious cryptographic uses, these numbers are also good
52  * for seeding TCP sequence numbers, and other places where it is
53  * desirable to have numbers which are not only random, but hard to
54  * predict by an attacker.
55  *
56  * Theory of operation
57  * ===================
58  *
59  * Computers are very predictable devices.  Hence it is extremely hard
60  * to produce truly random numbers on a computer --- as opposed to
61  * pseudo-random numbers, which can be easily generated by using an
62  * algorithm.  Unfortunately, it is very easy for attackers to guess
63  * the sequence of pseudo-random number generators, and for some
64  * applications this is not acceptable.  Instead, we must try to
65  * gather "environmental noise" from the computer's environment, which
66  * must be hard for outside attackers to observe and use to
67  * generate random numbers.  In a Unix environment, this is best done
68  * from inside the kernel.
69  *
70  * Sources of randomness from the environment include inter-keyboard
71  * timings, inter-interrupt timings from some interrupts, and other
72  * events which are both (a) non-deterministic and (b) hard for an
73  * outside observer to measure.  Randomness from these sources is
74  * added to the "entropy pool", which is mixed using a CRC-like function.
75  * This is not cryptographically strong, but it is adequate assuming
76  * the randomness is not chosen maliciously, and it is fast enough that
77  * the overhead of doing it on every interrupt is very reasonable.
78  * As random bytes are mixed into the entropy pool, the routines keep
79  * an *estimate* of how many bits of randomness have been stored into
80  * the random number generator's internal state.
81  *
82  * When random bytes are desired, they are obtained by taking the MD5
83  * hash of the content of the entropy pool.  The MD5 hash avoids
84  * exposing the internal state of the entropy pool.  It is believed to
85  * be computationally infeasible to derive any useful information
86  * about the input of MD5 from its output.  Even if it is possible to
87  * analyze MD5 in some clever way, as long as the amount of data
88  * returned from the generator is less than the inherent entropy in
89  * the pool, the output data is totally unpredictable.  For this
90  * reason, the routine decreases its internal estimate of how many
91  * bits of "true randomness" are contained in the entropy pool as it
92  * outputs random numbers.
93  *
94  * If this estimate goes to zero, the routine can still generate
95  * random numbers; however, an attacker may (at least in theory) be
96  * able to infer the future output of the generator from prior
97  * outputs.  This requires successful cryptanalysis of MD5, which is
98  * believed to be not feasible, but there is a remote possibility.
99  * Nonetheless, these numbers should be useful for the vast majority
100  * of purposes.
101  *
102  * Exported interfaces ---- output
103  * ===============================
104  *
105  * There are three exported interfaces.
106  * The first set are designed to be used from within the kernel:
107  *
108  *	void get_random_bytes(void *buf, int nbytes);
109  *
110  * This interface will return the requested number of random bytes,
111  * and place it in the requested buffer.
112  *
113  * Two other interfaces are two character devices /dev/random and
114  * /dev/urandom.  /dev/random is suitable for use when very high
115  * quality randomness is desired (for example, for key generation or
116  * one-time pads), as it will only return a maximum of the number of
117  * bits of randomness (as estimated by the random number generator)
118  * contained in the entropy pool.
119  *
120  * The /dev/urandom device does not have this limit, and will return
121  * as many bytes as were requested.  As more and more random bytes
122  * requested without giving time for the entropy pool to recharge,
123  * this will result in random numbers that are merely cryptographically
124  * strong.  For many applications, however, this is acceptable.
125  *
126  * Exported interfaces ---- input
127  * ==============================
128  *
129  * The current exported interfaces for gathering environmental noise
130  * from the devices are:
131  *
132  *	void add_true_randomness(int data);
133  *	void add_timer_randomness(int data);
134  *	void add_mouse_randomness(int mouse_data);
135  *	void add_net_randomness(int isr);
136  *	void add_tty_randomness(int c);
137  *	void add_disk_randomness(int n);
138  *	void add_audio_randomness(int n);
139  *	void add_video_randomness(int n);
140  *
141  * add_true_randomness() uses true random number generators present
142  * on some cryptographic and system chipsets.  Entropy accounting
143  * is not quitable, no timing is done, supplied 32 bits of pure entropy
144  * are hashed into the pool plain and blindly, increasing the counter.
145  *
146  * add_timer_randomness() uses the random driver itselves timing,
147  * measuring extract_entropy() and rndioctl() execution times.
148  *
149  * add_mouse_randomness() uses the mouse interrupt timing, as well as
150  * the reported position of the mouse from the hardware.
151  *
152  * add_net_randomness() times the finishing time of net input.
153  *
154  * add_tty_randomness() uses the inter-keypress timing, as well as the
155  * character as random inputs into the entropy pool.
156  *
157  * add_disk_randomness() times the finishing time of disk requests as well
158  * as feeding both xfer size & time into the entropy pool.
159  *
160  * add_audio_randomness() times the finishing of audio codec dma
161  * requests for both recording and playback, apparently supplies quite
162  * a lot of entropy. I'd blame it on low resolution audio clock generators.
163  *
164  * All of these routines (except for add_true_randomness() of course)
165  * try to estimate how many bits of randomness are in a particular
166  * randomness source.  They do this by keeping track of the first and
167  * second order deltas of the event timings.
168  *
169  * Ensuring unpredictability at system startup
170  * ============================================
171  *
172  * When any operating system starts up, it will go through a sequence
173  * of actions that are fairly predictable by an adversary, especially
174  * if the start-up does not involve interaction with a human operator.
175  * This reduces the actual number of bits of unpredictability in the
176  * entropy pool below the value in entropy_count.  In order to
177  * counteract this effect, it helps to carry information in the
178  * entropy pool across shut-downs and start-ups.  To do this, put the
179  * following lines in appropriate script which is run during the boot
180  * sequence:
181  *
182  *	echo "Initializing random number generator..."
183  *	# Carry a random seed from start-up to start-up
184  *	# Load and then save 512 bytes, which is the size of the entropy pool
185  *	if [ -f /etc/random-seed ]; then
186  *		cat /etc/random-seed >/dev/urandom
187  *	fi
188  *	dd if=/dev/urandom of=/etc/random-seed count=1
189  *
190  * and the following lines in appropriate script which is run when
191  * the system is shutting down:
192  *
193  *	# Carry a random seed from shut-down to start-up
194  *	# Save 512 bytes, which is the size of the entropy pool
195  *	echo "Saving random seed..."
196  *	dd if=/dev/urandom of=/etc/random-seed count=1
197  *
198  * For example, on OpenBSD systems, the appropriate scripts are
199  * usually /etc/rc.local and /etc/rc.shutdown, respectively.
200  *
201  * Effectively, these commands cause the contents of the entropy pool
202  * to be saved at shutdown time and reloaded into the entropy pool at
203  * start-up.  (The 'dd' in the addition to the bootup script is to
204  * make sure that /etc/random-seed is different for every start-up,
205  * even if the system crashes without executing rc.shutdown) Even with
206  * complete knowledge of the start-up activities, predicting the state
207  * of the entropy pool requires knowledge of the previous history of
208  * the system.
209  *
210  * Configuring the random(4) driver under OpenBSD
211  * ==============================================
212  *
213  * The special files for the random(4) driver should have been created
214  * during the installation process.  However, if your system does not have
215  * /dev/random and /dev/[s|u|p|a]random created already, they can be created
216  * by using the MAKEDEV(8) script in /dev:
217  *
218  *	/dev/MAKEDEV random
219  *
220  * Check MAKEDEV for information about major and minor numbers.
221  *
222  * Acknowledgements:
223  * =================
224  *
225  * Ideas for constructing this random number generator were derived
226  * from Pretty Good Privacy's random number generator, and from private
227  * discussions with Phil Karn.  Colin Plumb provided a faster random
228  * number generator, which speeds up the mixing function of the entropy
229  * pool, taken from PGPfone.  Dale Worley has also contributed many
230  * useful ideas and suggestions to improve this driver.
231  *
232  * Any flaws in the design are solely my responsibility, and should
233  * not be attributed to the Phil, Colin, or any of the authors of PGP.
234  *
235  * Further background information on this topic may be obtained from
236  * RFC 1750, "Randomness Recommendations for Security", by Donald
237  * Eastlake, Steve Crocker, and Jeff Schiller.
238  */
239 
240 #include <sys/param.h>
241 #include <sys/endian.h>
242 #include <sys/systm.h>
243 #include <sys/conf.h>
244 #include <sys/disk.h>
245 #include <sys/limits.h>
246 #include <sys/ioctl.h>
247 #include <sys/malloc.h>
248 #include <sys/fcntl.h>
249 #include <sys/vnode.h>
250 #include <sys/sysctl.h>
251 #include <sys/timeout.h>
252 #include <sys/poll.h>
253 #include <sys/mutex.h>
254 #include <sys/msgbuf.h>
255 
256 #include <crypto/md5.h>
257 #include <crypto/arc4.h>
258 
259 #include <dev/rndvar.h>
260 #include <dev/rndioctl.h>
261 
262 #ifdef	RNDEBUG
263 int	rnd_debug = 0x0000;
264 #define	RD_INPUT	0x000f	/* input data */
265 #define	RD_OUTPUT	0x00f0	/* output data */
266 #define	RD_WAIT		0x0100	/* sleep/wakeup for good data */
267 #endif
268 
269 /*
270  * Master random number pool functions
271  * -----------------------------------
272  */
273 
274 /*
275  * For the purposes of better mixing, we use the CRC-32 polynomial as
276  * well to make a twisted Generalized Feedback Shift Register
277  *
278  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
279  * Transactions on Modeling and Computer Simulation 2(3):179-194.
280  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
281  * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
282  *
283  * Thanks to Colin Plumb for suggesting this.
284  *
285  * We have not analyzed the resultant polynomial to prove it primitive;
286  * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
287  * of a random large-degree polynomial over GF(2) are more than large enough
288  * that periodicity is not a concern.
289  *
290  * The input hash is much less sensitive than the output hash.  All
291  * we want from it is to be a good non-cryptographic hash -
292  * i.e. to not produce collisions when fed "random" data of the sort
293  * we expect to see.  As long as the pool state differs for different
294  * inputs, we have preserved the input entropy and done a good job.
295  * The fact that an intelligent attacker can construct inputs that
296  * will produce controlled alterations to the pool's state is not
297  * important because we don't consider such inputs to contribute any
298  * randomness.  The only property we need with respect to them is that
299  * the attacker can't increase his/her knowledge of the pool's state.
300  * Since all additions are reversible (knowing the final state and the
301  * input, you can reconstruct the initial state), if an attacker has
302  * any uncertainty about the initial state, he/she can only shuffle
303  * that uncertainty about, but never cause any collisions (which would
304  * decrease the uncertainty).
305  *
306  * The chosen system lets the state of the pool be (essentially) the input
307  * modulo the generator polynomial.  Now, for random primitive polynomials,
308  * this is a universal class of hash functions, meaning that the chance
309  * of a collision is limited by the attacker's knowledge of the generator
310  * polynomial, so if it is chosen at random, an attacker can never force
311  * a collision.  Here, we use a fixed polynomial, but we *can* assume that
312  * ###--> it is unknown to the processes generating the input entropy. <-###
313  * Because of this important property, this is a good, collision-resistant
314  * hash; hash collisions will occur no more often than chance.
315  */
316 
317 /*
318  * Stirring polynomials over GF(2) for various pool sizes. Used in
319  * add_entropy_words() below.
320  *
321  * The polynomial terms are chosen to be evenly spaced (minimum RMS
322  * distance from evenly spaced; except for the last tap, which is 1 to
323  * get the twisting happening as fast as possible.
324  *
325  * The reultant polynomial is:
326  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
327  */
328 #define POOLBITS	(POOLWORDS*32)
329 #define POOLBYTES	(POOLWORDS*4)
330 #define POOLMASK	(POOLWORDS - 1)
331 #if POOLWORDS == 2048
332 #define	POOL_TAP1	1638
333 #define	POOL_TAP2	1231
334 #define	POOL_TAP3	819
335 #define	POOL_TAP4	411
336 #elif POOLWORDS == 1024	/* also (819, 616, 410, 207, 2) */
337 #define	POOL_TAP1	817
338 #define	POOL_TAP2	615
339 #define	POOL_TAP3	412
340 #define	POOL_TAP4	204
341 #elif POOLWORDS == 512	/* also (409,307,206,102,2), (409,309,205,103,2) */
342 #define	POOL_TAP1	411
343 #define	POOL_TAP2	308
344 #define	POOL_TAP3	208
345 #define	POOL_TAP4	104
346 #elif POOLWORDS == 256
347 #define	POOL_TAP1	205
348 #define	POOL_TAP2	155
349 #define	POOL_TAP3	101
350 #define	POOL_TAP4	52
351 #elif POOLWORDS == 128	/* also (103, 78, 51, 27, 2) */
352 #define	POOL_TAP1	103
353 #define	POOL_TAP2	76
354 #define	POOL_TAP3	51
355 #define	POOL_TAP4	25
356 #elif POOLWORDS == 64
357 #define	POOL_TAP1	52
358 #define	POOL_TAP2	39
359 #define	POOL_TAP3	26
360 #define	POOL_TAP4	14
361 #elif POOLWORDS == 32
362 #define	POOL_TAP1	26
363 #define	POOL_TAP2	20
364 #define	POOL_TAP3	14
365 #define	POOL_TAP4	7
366 #else
367 #error No primitive polynomial available for chosen POOLWORDS
368 #endif
369 
370 static void dequeue_randomness(void *);
371 
372 /* Master kernel random number pool. */
373 struct random_bucket {
374 	u_int	add_ptr;
375 	u_int	entropy_count;
376 	u_char	input_rotate;
377 	u_int32_t pool[POOLWORDS];
378 	u_int	asleep;
379 	u_int	tmo;
380 };
381 struct random_bucket random_state;
382 struct mutex rndlock;
383 
384 /*
385  * This function adds a byte into the entropy pool.  It does not
386  * update the entropy estimate.  The caller must do this if appropriate.
387  *
388  * The pool is stirred with a polynomial of degree POOLWORDS over GF(2);
389  * see POOL_TAP[1-4] above
390  *
391  * Rotate the input word by a changing number of bits, to help assure
392  * that all bits in the entropy get toggled.  Otherwise, if the pool
393  * is consistently feed small numbers (such as keyboard scan codes)
394  * then the upper bits of the entropy pool will frequently remain
395  * untouched.
396  */
397 static void
398 add_entropy_words(const u_int32_t *buf, u_int n)
399 {
400 	/* derived from IEEE 802.3 CRC-32 */
401 	static const u_int32_t twist_table[8] = {
402 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
403 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
404 	};
405 
406 	for (; n--; buf++) {
407 		u_int32_t w = (*buf << random_state.input_rotate) |
408 		    (*buf >> (32 - random_state.input_rotate));
409 		u_int i = random_state.add_ptr =
410 		    (random_state.add_ptr - 1) & POOLMASK;
411 		/*
412 		 * Normally, we add 7 bits of rotation to the pool.
413 		 * At the beginning of the pool, add an extra 7 bits
414 		 * rotation, so that successive passes spread the
415 		 * input bits across the pool evenly.
416 		 */
417 		random_state.input_rotate =
418 		    (random_state.input_rotate + (i ? 7 : 14)) & 31;
419 
420 		/* XOR pool contents corresponding to polynomial terms */
421 		w ^= random_state.pool[(i + POOL_TAP1) & POOLMASK] ^
422 		     random_state.pool[(i + POOL_TAP2) & POOLMASK] ^
423 		     random_state.pool[(i + POOL_TAP3) & POOLMASK] ^
424 		     random_state.pool[(i + POOL_TAP4) & POOLMASK] ^
425 		     random_state.pool[(i + 1) & POOLMASK] ^
426 		     random_state.pool[i]; /* + 2^POOLWORDS */
427 
428 		random_state.pool[i] = (w >> 3) ^ twist_table[w & 7];
429 	}
430 }
431 
432 /*
433  * This function extracts randomness from the entropy pool, and
434  * returns it in a buffer.  This function computes how many remaining
435  * bits of entropy are left in the pool, but it does not restrict the
436  * number of bytes that are actually obtained.
437  */
438 static void
439 extract_entropy(u_int8_t *buf, int nbytes)
440 {
441 	struct random_bucket *rs = &random_state;
442 	u_char buffer[16];
443 	MD5_CTX tmp;
444 	u_int i;
445 
446 	add_timer_randomness(nbytes);
447 
448 	while (nbytes) {
449 		if (nbytes < sizeof(buffer) / 2)
450 			i = nbytes;
451 		else
452 			i = sizeof(buffer) / 2;
453 
454 		/* Hash the pool to get the output */
455 		MD5Init(&tmp);
456 		mtx_enter(&rndlock);
457 		MD5Update(&tmp, (u_int8_t*)rs->pool, sizeof(rs->pool));
458 		if (rs->entropy_count / 8 > i)
459 			rs->entropy_count -= i * 8;
460 		else
461 			rs->entropy_count = 0;
462 		mtx_leave(&rndlock);
463 		MD5Final(buffer, &tmp);
464 
465 		/*
466 		 * In case the hash function has some recognizable
467 		 * output pattern, we fold it in half.
468 		 */
469 		buffer[0] ^= buffer[15];
470 		buffer[1] ^= buffer[14];
471 		buffer[2] ^= buffer[13];
472 		buffer[3] ^= buffer[12];
473 		buffer[4] ^= buffer[11];
474 		buffer[5] ^= buffer[10];
475 		buffer[6] ^= buffer[ 9];
476 		buffer[7] ^= buffer[ 8];
477 
478 		/* Copy data to destination buffer */
479 		bcopy(buffer, buf, i);
480 		nbytes -= i;
481 		buf += i;
482 
483 		/* Modify pool so next hash will produce different results */
484 		add_timer_randomness(nbytes);
485 		dequeue_randomness(&random_state);
486 	}
487 
488 	/* Wipe data from memory */
489 	bzero(&tmp, sizeof(tmp));
490 	bzero(buffer, sizeof(buffer));
491 }
492 
493 /*
494  * Kernel-side entropy crediting API and handling of entropy-bearing events
495  * ------------------------------------------------------------------------
496  */
497 
498 /* pIII/333 reported to have some drops w/ these numbers */
499 #define QEVLEN (1024 / sizeof(struct rand_event))
500 #define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
501 #define QEVSBITS 10
502 
503 /* There is one of these per entropy source */
504 struct timer_rand_state {
505 	u_int	last_time;
506 	u_int	last_delta;
507 	u_int	last_delta2;
508 	u_int	dont_count_entropy : 1;
509 	u_int	max_entropy : 1;
510 };
511 
512 struct rand_event {
513 	struct timer_rand_state *re_state;
514 	u_int re_nbits;
515 	u_int re_time;
516 	u_int re_val;
517 };
518 
519 struct timer_rand_state rnd_states[RND_SRC_NUM];
520 struct rand_event rnd_event_space[QEVLEN];
521 struct rand_event *rnd_event_head = rnd_event_space;
522 struct rand_event *rnd_event_tail = rnd_event_space;
523 struct timeout rnd_timeout;
524 struct selinfo rnd_rsel;
525 struct rndstats rndstats;
526 int rnd_attached;
527 
528 /* must be called at a proper spl, returns ptr to the next event */
529 static __inline struct rand_event *
530 rnd_get(void)
531 {
532 	struct rand_event *p = rnd_event_tail;
533 
534 	if (p == rnd_event_head)
535 		return NULL;
536 
537 	if (p + 1 >= &rnd_event_space[QEVLEN])
538 		rnd_event_tail = rnd_event_space;
539 	else
540 		rnd_event_tail++;
541 
542 	return p;
543 }
544 
545 /* must be called at a proper spl, returns next available item */
546 static __inline struct rand_event *
547 rnd_put(void)
548 {
549 	struct rand_event *p = rnd_event_head + 1;
550 
551 	if (p >= &rnd_event_space[QEVLEN])
552 		p = rnd_event_space;
553 
554 	if (p == rnd_event_tail)
555 		return NULL;
556 
557 	return rnd_event_head = p;
558 }
559 
560 /* must be called at a proper spl, returns number of items in the queue */
561 static __inline int
562 rnd_qlen(void)
563 {
564 	int len = rnd_event_head - rnd_event_tail;
565 	return (len < 0)? -len : len;
566 }
567 
568 /*
569  * This function adds entropy to the entropy pool by using timing
570  * delays.  It uses the timer_rand_state structure to make an estimate
571  * of how many bits of entropy this call has added to the pool.
572  *
573  * The number "val" is also added to the pool - it should somehow describe
574  * the type of event which just happened.  Currently the values of 0-255
575  * are for keyboard scan codes, 256 and upwards - for interrupts.
576  * On the i386, this is assumed to be at most 16 bits, and the high bits
577  * are used for a high-resolution timer.
578  */
579 void
580 enqueue_randomness(int state, int val)
581 {
582 	struct timer_rand_state *p;
583 	struct rand_event *rep;
584 	struct timespec	tv;
585 	u_int	time, nbits;
586 
587 #ifdef DIAGNOSTIC
588 	if (state < 0 || state >= RND_SRC_NUM)
589 		return;
590 #endif
591 
592 	p = &rnd_states[state];
593 	val += state << 13;
594 
595 	if (!rnd_attached) {
596 		if ((rep = rnd_put()) == NULL) {
597 			rndstats.rnd_drops++;
598 			return;
599 		}
600 
601 		rep->re_state = &rnd_states[RND_SRC_TIMER];
602 		rep->re_nbits = 0;
603 		rep->re_time = 0;
604 		rep->re_time = val;
605 		return;
606 	}
607 
608 	nanotime(&tv);
609 	time = (tv.tv_nsec >> 10) + (tv.tv_sec << 20);
610 	nbits = 0;
611 
612 	/*
613 	 * Calculate the number of bits of randomness that we probably
614 	 * added.  We take into account the first and second order
615 	 * deltas in order to make our estimate.
616 	 */
617 	if (!p->dont_count_entropy) {
618 		int	delta, delta2, delta3;
619 		delta  = time   - p->last_time;
620 		delta2 = delta  - p->last_delta;
621 		delta3 = delta2 - p->last_delta2;
622 
623 		if (delta < 0) delta = -delta;
624 		if (delta2 < 0) delta2 = -delta2;
625 		if (delta3 < 0) delta3 = -delta3;
626 		if (delta > delta2) delta = delta2;
627 		if (delta > delta3) delta = delta3;
628 		delta3 = delta >>= 1;
629 		/*
630 		 * delta &= 0xfff;
631 		 * we don't do it since our time sheet is different from linux
632 		 */
633 
634 		if (delta & 0xffff0000) {
635 			nbits = 16;
636 			delta >>= 16;
637 		}
638 		if (delta & 0xff00) {
639 			nbits += 8;
640 			delta >>= 8;
641 		}
642 		if (delta & 0xf0) {
643 			nbits += 4;
644 			delta >>= 4;
645 		}
646 		if (delta & 0xc) {
647 			nbits += 2;
648 			delta >>= 2;
649 		}
650 		if (delta & 2) {
651 			nbits += 1;
652 			delta >>= 1;
653 		}
654 		if (delta & 1)
655 			nbits++;
656 
657 		/*
658 		 * the logic is to drop low-entropy entries,
659 		 * in hope for dequeuing to be more randomfull
660 		 */
661 		if (rnd_qlen() > QEVSLOW && nbits < QEVSBITS) {
662 			rndstats.rnd_drople++;
663 			return;
664 		}
665 		p->last_time = time;
666 		p->last_delta  = delta3;
667 		p->last_delta2 = delta2;
668 	} else if (p->max_entropy)
669 		nbits = 8 * sizeof(val) - 1;
670 
671 	/* given the multi-order delta logic above, this should never happen */
672 	if (nbits >= 32)
673 		return;
674 
675 	mtx_enter(&rndlock);
676 	if ((rep = rnd_put()) == NULL) {
677 		rndstats.rnd_drops++;
678 		mtx_leave(&rndlock);
679 		return;
680 	}
681 
682 	rep->re_state = p;
683 	rep->re_nbits = nbits;
684 	rep->re_time = tv.tv_nsec ^ (tv.tv_sec << 20);
685 	rep->re_val = val;
686 
687 	rndstats.rnd_enqs++;
688 	rndstats.rnd_ed[nbits]++;
689 	rndstats.rnd_sc[state]++;
690 	rndstats.rnd_sb[state] += nbits;
691 
692 	if (rnd_qlen() > QEVSLOW/2 && !random_state.tmo) {
693 		random_state.tmo++;
694 		timeout_add(&rnd_timeout, 1);
695 	}
696 	mtx_leave(&rndlock);
697 }
698 
699 static void
700 dequeue_randomness(void *v)
701 {
702 	struct random_bucket *rs = v;
703 	struct rand_event *rep;
704 	u_int32_t buf[2];
705 	u_int nbits;
706 
707 	timeout_del(&rnd_timeout);
708 	rndstats.rnd_deqs++;
709 
710 	mtx_enter(&rndlock);
711 	while ((rep = rnd_get())) {
712 
713 		buf[0] = rep->re_time;
714 		buf[1] = rep->re_val;
715 		nbits = rep->re_nbits;
716 		mtx_leave(&rndlock);
717 
718 		add_entropy_words(buf, 2);
719 
720 		rndstats.rnd_total += nbits;
721 		rs->entropy_count += nbits;
722 		if (rs->entropy_count > POOLBITS)
723 			rs->entropy_count = POOLBITS;
724 
725 		if (rs->asleep && rs->entropy_count > 8) {
726 #ifdef	RNDEBUG
727 			if (rnd_debug & RD_WAIT)
728 				printf("rnd: wakeup[%u]{%u}\n",
729 				    rs->asleep,
730 				    rs->entropy_count);
731 #endif
732 			rs->asleep--;
733 			wakeup((void *)&rs->asleep);
734 			selwakeup(&rnd_rsel);
735 		}
736 
737 		mtx_enter(&rndlock);
738 	}
739 
740 	rs->tmo = 0;
741 	mtx_leave(&rndlock);
742 }
743 
744 /*
745  * Exported kernel CPRNG API: arc4random() and friends
746  * ---------------------------------------------------
747  */
748 
749 /*
750  * Maximum number of bytes to serve directly from the main arc4random
751  * pool. Larger requests are served from discrete arc4 instances keyed
752  * from the main pool.
753  */
754 #define ARC4_MAIN_MAX_BYTES	2048
755 
756 /*
757  * Key size (in bytes) for arc4 instances setup to serve requests larger
758  * than ARC4_MAIN_MAX_BYTES.
759  */
760 #define ARC4_SUB_KEY_BYTES	(256 / 8)
761 
762 struct timeout arc4_timeout;
763 struct rc4_ctx arc4random_state;
764 int arc4random_initialized;
765 u_long arc4random_count = 0;
766 
767 /*
768  * This function returns some number of good random numbers but is quite
769  * slow. Please use arc4random_buf() instead unless very high quality
770  * randomness is required.
771  * XXX: rename this
772  */
773 void
774 get_random_bytes(void *buf, size_t nbytes)
775 {
776 	extract_entropy((u_int8_t *) buf, nbytes);
777 	rndstats.rnd_used += nbytes * 8;
778 }
779 
780 static void
781 arc4_stir(void)
782 {
783 	u_int8_t buf[256];
784 	int len;
785 
786 	nanotime((struct timespec *) buf);
787 	len = sizeof(buf) - sizeof(struct timespec);
788 	get_random_bytes(buf + sizeof (struct timespec), len);
789 	len += sizeof(struct timespec);
790 
791 	mtx_enter(&rndlock);
792 	if (rndstats.arc4_nstirs > 0)
793 		rc4_crypt(&arc4random_state, buf, buf, sizeof(buf));
794 
795 	rc4_keysetup(&arc4random_state, buf, sizeof(buf));
796 	arc4random_count = 0;
797 	rndstats.arc4_stirs += len;
798 	rndstats.arc4_nstirs++;
799 
800 	/*
801 	 * Throw away the first N words of output, as suggested in the
802 	 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
803 	 * by Fluher, Mantin, and Shamir.  (N = 256 in our case.)
804 	 */
805 	rc4_skip(&arc4random_state, 256 * 4);
806 	mtx_leave(&rndlock);
807 }
808 
809 /*
810  * called by timeout to mark arc4 for stirring,
811  * actual stirring happens on any access attempt.
812  */
813 static void
814 arc4_reinit(void *v)
815 {
816 	arc4random_initialized = 0;
817 }
818 
819 static void
820 arc4maybeinit(void)
821 {
822 
823 	if (!arc4random_initialized) {
824 #ifdef DIAGNOSTIC
825 		if (!rnd_attached)
826 			panic("arc4maybeinit: premature");
827 #endif
828 		arc4random_initialized++;
829 		arc4_stir();
830 		/* 10 minutes, per dm@'s suggestion */
831 		timeout_add_sec(&arc4_timeout, 10 * 60);
832 	}
833 }
834 
835 void
836 randomattach(void)
837 {
838 	if (rnd_attached) {
839 #ifdef RNDEBUG
840 		printf("random: second attach\n");
841 #endif
842 		return;
843 	}
844 
845 	timeout_set(&rnd_timeout, dequeue_randomness, &random_state);
846 	timeout_set(&arc4_timeout, arc4_reinit, NULL);
847 
848 	random_state.add_ptr = 0;
849 	random_state.entropy_count = 0;
850 	rnd_states[RND_SRC_TIMER].dont_count_entropy = 1;
851 	rnd_states[RND_SRC_TRUE].dont_count_entropy = 1;
852 	rnd_states[RND_SRC_TRUE].max_entropy = 1;
853 
854 	mtx_init(&rndlock, IPL_HIGH);
855 	arc4_reinit(NULL);
856 
857 	if (msgbufp && msgbufp->msg_magic == MSG_MAGIC)
858 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
859 		    msgbufp->msg_bufs / sizeof(u_int32_t));
860 	rnd_attached = 1;
861 }
862 
863 /* Return one word of randomness from an RC4 generator */
864 u_int32_t
865 arc4random(void)
866 {
867 	u_int32_t ret;
868 
869 	arc4maybeinit();
870 	mtx_enter(&rndlock);
871 	rc4_getbytes(&arc4random_state, (u_char*)&ret, sizeof(ret));
872 	rndstats.arc4_reads += sizeof(ret);
873 	arc4random_count += sizeof(ret);
874 	mtx_leave(&rndlock);
875 	return ret;
876 }
877 
878 /*
879  * Return a "large" buffer of randomness using an independantly-keyed RC4
880  * generator.
881  */
882 static void
883 arc4random_buf_large(void *buf, size_t n)
884 {
885 	u_char lbuf[ARC4_SUB_KEY_BYTES];
886 	struct rc4_ctx lctx;
887 
888 	mtx_enter(&rndlock);
889 	rc4_getbytes(&arc4random_state, lbuf, sizeof(lbuf));
890 	rndstats.arc4_reads += n;
891 	arc4random_count += sizeof(lbuf);
892 	mtx_leave(&rndlock);
893 
894 	rc4_keysetup(&lctx, lbuf, sizeof(lbuf));
895 	rc4_skip(&lctx, 256 * 4);
896 	rc4_getbytes(&lctx, (u_char*)buf, n);
897 	bzero(lbuf, sizeof(lbuf));
898 	bzero(&lctx, sizeof(lctx));
899 }
900 
901 /*
902  * Fill a buffer of arbitrary length with RC4-derived randomness.
903  */
904 void
905 arc4random_buf(void *buf, size_t n)
906 {
907 	arc4maybeinit();
908 
909 	/* Satisfy large requests via an independent ARC4 instance */
910 	if (n > ARC4_MAIN_MAX_BYTES) {
911 		arc4random_buf_large(buf, n);
912 		return;
913 	}
914 
915 	mtx_enter(&rndlock);
916 	rc4_getbytes(&arc4random_state, (u_char*)buf, n);
917 	rndstats.arc4_reads += n;
918 	arc4random_count += n;
919 	mtx_leave(&rndlock);
920 }
921 
922 /*
923  * Calculate a uniformly distributed random number less than upper_bound
924  * avoiding "modulo bias".
925  *
926  * Uniformity is achieved by generating new random numbers until the one
927  * returned is outside the range [0, 2**32 % upper_bound).  This
928  * guarantees the selected random number will be inside
929  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
930  * after reduction modulo upper_bound.
931  */
932 u_int32_t
933 arc4random_uniform(u_int32_t upper_bound)
934 {
935 	u_int32_t r, min;
936 
937 	if (upper_bound < 2)
938 		return 0;
939 
940 #if (ULONG_MAX > 0xffffffffUL)
941 	min = 0x100000000UL % upper_bound;
942 #else
943 	/* Calculate (2**32 % upper_bound) avoiding 64-bit math */
944 	if (upper_bound > 0x80000000)
945 		min = 1 + ~upper_bound;		/* 2**32 - upper_bound */
946 	else {
947 		/* (2**32 - x) % x == 2**32 % x when x <= 2**31 */
948 		min = ((0xffffffff - upper_bound) + 1) % upper_bound;
949 	}
950 #endif
951 
952 	/*
953 	 * This could theoretically loop forever but each retry has
954 	 * p > 0.5 (worst case, usually far better) of selecting a
955 	 * number inside the range we need, so it should rarely need
956 	 * to re-roll.
957 	 */
958 	for (;;) {
959 		r = arc4random();
960 		if (r >= min)
961 			break;
962 	}
963 
964 	return r % upper_bound;
965 }
966 
967 /*
968  * random, srandom, urandom, arandom char devices
969  * -------------------------------------------------------
970  */
971 
972 void filt_rndrdetach(struct knote *kn);
973 int filt_rndread(struct knote *kn, long hint);
974 
975 struct filterops rndread_filtops =
976 	{ 1, NULL, filt_rndrdetach, filt_rndread};
977 
978 void filt_rndwdetach(struct knote *kn);
979 int filt_rndwrite(struct knote *kn, long hint);
980 
981 struct filterops rndwrite_filtops =
982 	{ 1, NULL, filt_rndwdetach, filt_rndwrite};
983 
984 struct selinfo rnd_wsel;
985 
986 int
987 randomopen(dev_t dev, int flag, int mode, struct proc *p)
988 {
989 	return (minor (dev) < RND_NODEV) ? 0 : ENXIO;
990 }
991 
992 int
993 randomclose(dev_t dev, int flag, int mode, struct proc *p)
994 {
995 	return 0;
996 }
997 
998 int
999 randomread(dev_t dev, struct uio *uio, int ioflag)
1000 {
1001 	int		ret = 0;
1002 	u_int32_t 	*buf;
1003 
1004 	if (uio->uio_resid == 0)
1005 		return 0;
1006 
1007 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
1008 
1009 	while (!ret && uio->uio_resid > 0) {
1010 		int	n = min(POOLBYTES, uio->uio_resid);
1011 
1012 		switch(minor(dev)) {
1013 		case RND_RND:
1014 			ret = EIO;	/* no chip -- error */
1015 			break;
1016 		case RND_SRND:
1017 			if (random_state.entropy_count < 16 * 8) {
1018 				if (ioflag & IO_NDELAY) {
1019 					ret = EWOULDBLOCK;
1020 					break;
1021 				}
1022 #ifdef	RNDEBUG
1023 				if (rnd_debug & RD_WAIT)
1024 					printf("rnd: sleep[%u]\n",
1025 					    random_state.asleep);
1026 #endif
1027 				random_state.asleep++;
1028 				rndstats.rnd_waits++;
1029 				ret = tsleep(&random_state.asleep,
1030 				    PWAIT | PCATCH, "rndrd", 0);
1031 #ifdef	RNDEBUG
1032 				if (rnd_debug & RD_WAIT)
1033 					printf("rnd: awakened(%d)\n", ret);
1034 #endif
1035 				if (ret)
1036 					break;
1037 			}
1038 			if (n > random_state.entropy_count / 8)
1039 				n = random_state.entropy_count / 8;
1040 			rndstats.rnd_reads++;
1041 #ifdef	RNDEBUG
1042 			if (rnd_debug & RD_OUTPUT)
1043 				printf("rnd: %u possible output\n", n);
1044 #endif
1045 		case RND_URND:
1046 			get_random_bytes((char *)buf, n);
1047 #ifdef	RNDEBUG
1048 			if (rnd_debug & RD_OUTPUT)
1049 				printf("rnd: %u bytes for output\n", n);
1050 #endif
1051 			break;
1052 		case RND_ARND_OLD:
1053 		case RND_ARND:
1054 			arc4random_buf(buf, n);
1055 			break;
1056 		default:
1057 			ret = ENXIO;
1058 		}
1059 		if (n != 0 && ret == 0) {
1060 			ret = uiomove((caddr_t)buf, n, uio);
1061 			if (!ret && uio->uio_resid > 0)
1062 				yield();
1063 		}
1064 	}
1065 
1066 	free(buf, M_TEMP);
1067 	return ret;
1068 }
1069 
1070 int
1071 randompoll(dev_t dev, int events, struct proc *p)
1072 {
1073 	int revents;
1074 
1075 	revents = events & (POLLOUT | POLLWRNORM);	/* always writable */
1076 	if (events & (POLLIN | POLLRDNORM)) {
1077 		if (minor(dev) == RND_SRND && random_state.entropy_count <= 0)
1078 			selrecord(p, &rnd_rsel);
1079 		else
1080 			revents |= events & (POLLIN | POLLRDNORM);
1081 	}
1082 
1083 	return (revents);
1084 }
1085 
1086 int
1087 randomkqfilter(dev_t dev, struct knote *kn)
1088 {
1089 	struct klist *klist;
1090 
1091 	switch (kn->kn_filter) {
1092 	case EVFILT_READ:
1093 		klist = &rnd_rsel.si_note;
1094 		kn->kn_fop = &rndread_filtops;
1095 		break;
1096 	case EVFILT_WRITE:
1097 		klist = &rnd_wsel.si_note;
1098 		kn->kn_fop = &rndwrite_filtops;
1099 		break;
1100 	default:
1101 		return (1);
1102 	}
1103 	kn->kn_hook = (void *)&random_state;
1104 
1105 	mtx_enter(&rndlock);
1106 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1107 	mtx_leave(&rndlock);
1108 
1109 	return (0);
1110 }
1111 
1112 void
1113 filt_rndrdetach(struct knote *kn)
1114 {
1115 	mtx_enter(&rndlock);
1116 	SLIST_REMOVE(&rnd_rsel.si_note, kn, knote, kn_selnext);
1117 	mtx_leave(&rndlock);
1118 }
1119 
1120 int
1121 filt_rndread(struct knote *kn, long hint)
1122 {
1123 	struct random_bucket *rs = (struct random_bucket *)kn->kn_hook;
1124 
1125 	kn->kn_data = (int)rs->entropy_count;
1126 	return rs->entropy_count > 0;
1127 }
1128 
1129 void
1130 filt_rndwdetach(struct knote *kn)
1131 {
1132 	mtx_enter(&rndlock);
1133 	SLIST_REMOVE(&rnd_wsel.si_note, kn, knote, kn_selnext);
1134 	mtx_leave(&rndlock);
1135 }
1136 
1137 int
1138 filt_rndwrite(struct knote *kn, long hint)
1139 {
1140 	return (1);
1141 }
1142 
1143 int
1144 randomwrite(dev_t dev, struct uio *uio, int flags)
1145 {
1146 	int		ret = 0;
1147 	u_int32_t	*buf;
1148 
1149 	if (minor(dev) == RND_RND)
1150 		return ENXIO;
1151 
1152 	if (uio->uio_resid == 0)
1153 		return 0;
1154 
1155 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
1156 
1157 	while (!ret && uio->uio_resid > 0) {
1158 		u_short	n = min(POOLBYTES, uio->uio_resid);
1159 
1160 		ret = uiomove((caddr_t)buf, n, uio);
1161 		if (!ret) {
1162 			while (n % sizeof(u_int32_t))
1163 				((u_int8_t *) buf)[n++] = 0;
1164 			add_entropy_words(buf, n / 4);
1165 		}
1166 	}
1167 
1168 	if ((minor(dev) == RND_ARND || minor(dev) == RND_ARND_OLD) &&
1169 	    !ret)
1170 		arc4random_initialized = 0;
1171 
1172 	free(buf, M_TEMP);
1173 	return ret;
1174 }
1175 
1176 int
1177 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1178 {
1179 	int	ret = 0;
1180 	u_int	cnt;
1181 
1182 	add_timer_randomness((u_long)p ^ (u_long)data ^ cmd);
1183 
1184 	switch (cmd) {
1185 	case FIOASYNC:
1186 		/* rnd has no async flag in softc so this is really a no-op. */
1187 		break;
1188 
1189 	case FIONBIO:
1190 		/* Handled in the upper FS layer. */
1191 		break;
1192 
1193 	case RNDGETENTCNT:
1194 		mtx_enter(&rndlock);
1195 		*(u_int *)data = random_state.entropy_count;
1196 		mtx_leave(&rndlock);
1197 		break;
1198 	case RNDADDTOENTCNT:
1199 		if (suser(p, 0) != 0)
1200 			ret = EPERM;
1201 		else {
1202 			cnt = *(u_int *)data;
1203 			mtx_enter(&rndlock);
1204 			random_state.entropy_count += cnt;
1205 			if (random_state.entropy_count > POOLBITS)
1206 				random_state.entropy_count = POOLBITS;
1207 			mtx_leave(&rndlock);
1208 		}
1209 		break;
1210 	case RNDZAPENTCNT:
1211 		if (suser(p, 0) != 0)
1212 			ret = EPERM;
1213 		else {
1214 			mtx_enter(&rndlock);
1215 			random_state.entropy_count = 0;
1216 			mtx_leave(&rndlock);
1217 		}
1218 		break;
1219 	case RNDSTIRARC4:
1220 		if (suser(p, 0) != 0)
1221 			ret = EPERM;
1222 		else if (random_state.entropy_count < 64)
1223 			ret = EAGAIN;
1224 		else {
1225 			mtx_enter(&rndlock);
1226 			arc4random_initialized = 0;
1227 			mtx_leave(&rndlock);
1228 		}
1229 		break;
1230 	case RNDCLRSTATS:
1231 		if (suser(p, 0) != 0)
1232 			ret = EPERM;
1233 		else {
1234 			mtx_enter(&rndlock);
1235 			bzero(&rndstats, sizeof(rndstats));
1236 			mtx_leave(&rndlock);
1237 		}
1238 		break;
1239 	default:
1240 		ret = ENOTTY;
1241 	}
1242 
1243 	add_timer_randomness((u_long)p ^ (u_long)data ^ cmd);
1244 	return ret;
1245 }
1246