xref: /openbsd-src/sys/dev/rnd.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: rnd.c,v 1.99 2008/12/15 06:00:38 djm Exp $	*/
2 
3 /*
4  * rnd.c -- A strong random number generator
5  *
6  * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7  * Copyright (c) 2008 Damien Miller.
8  *
9  * Version 1.89, last modified 19-Sep-99
10  *
11  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, and the entire permission notice in its entirety,
19  *    including the disclaimer of warranties.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. The name of the author may not be used to endorse or promote
24  *    products derived from this software without specific prior
25  *    written permission.
26  *
27  * ALTERNATIVELY, this product may be distributed under the terms of
28  * the GNU Public License, in which case the provisions of the GPL are
29  * required INSTEAD OF the above restrictions.  (This clause is
30  * necessary due to a potential bad interaction between the GPL and
31  * the restrictions contained in a BSD-style copyright.)
32  *
33  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
34  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
36  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 /*
47  * (now, with legal B.S. out of the way.....)
48  *
49  * This routine gathers environmental noise from device drivers, etc.,
50  * and returns good random numbers, suitable for cryptographic use.
51  * Besides the obvious cryptographic uses, these numbers are also good
52  * for seeding TCP sequence numbers, and other places where it is
53  * desirable to have numbers which are not only random, but hard to
54  * predict by an attacker.
55  *
56  * Theory of operation
57  * ===================
58  *
59  * Computers are very predictable devices.  Hence it is extremely hard
60  * to produce truly random numbers on a computer --- as opposed to
61  * pseudo-random numbers, which can be easily generated by using an
62  * algorithm.  Unfortunately, it is very easy for attackers to guess
63  * the sequence of pseudo-random number generators, and for some
64  * applications this is not acceptable.  Instead, we must try to
65  * gather "environmental noise" from the computer's environment, which
66  * must be hard for outside attackers to observe and use to
67  * generate random numbers.  In a Unix environment, this is best done
68  * from inside the kernel.
69  *
70  * Sources of randomness from the environment include inter-keyboard
71  * timings, inter-interrupt timings from some interrupts, and other
72  * events which are both (a) non-deterministic and (b) hard for an
73  * outside observer to measure.  Randomness from these sources is
74  * added to the "entropy pool", which is mixed using a CRC-like function.
75  * This is not cryptographically strong, but it is adequate assuming
76  * the randomness is not chosen maliciously, and it is fast enough that
77  * the overhead of doing it on every interrupt is very reasonable.
78  * As random bytes are mixed into the entropy pool, the routines keep
79  * an *estimate* of how many bits of randomness have been stored into
80  * the random number generator's internal state.
81  *
82  * When random bytes are desired, they are obtained by taking the MD5
83  * hash of the content of the entropy pool.  The MD5 hash avoids
84  * exposing the internal state of the entropy pool.  It is believed to
85  * be computationally infeasible to derive any useful information
86  * about the input of MD5 from its output.  Even if it is possible to
87  * analyze MD5 in some clever way, as long as the amount of data
88  * returned from the generator is less than the inherent entropy in
89  * the pool, the output data is totally unpredictable.  For this
90  * reason, the routine decreases its internal estimate of how many
91  * bits of "true randomness" are contained in the entropy pool as it
92  * outputs random numbers.
93  *
94  * If this estimate goes to zero, the routine can still generate
95  * random numbers; however, an attacker may (at least in theory) be
96  * able to infer the future output of the generator from prior
97  * outputs.  This requires successful cryptanalysis of MD5, which is
98  * believed to be not feasible, but there is a remote possibility.
99  * Nonetheless, these numbers should be useful for the vast majority
100  * of purposes.
101  *
102  * Exported interfaces ---- output
103  * ===============================
104  *
105  * There are three exported interfaces.
106  * The first set are designed to be used from within the kernel:
107  *
108  *	void get_random_bytes(void *buf, int nbytes);
109  *
110  * This interface will return the requested number of random bytes,
111  * and place it in the requested buffer.
112  *
113  * Two other interfaces are two character devices /dev/random and
114  * /dev/urandom.  /dev/random is suitable for use when very high
115  * quality randomness is desired (for example, for key generation or
116  * one-time pads), as it will only return a maximum of the number of
117  * bits of randomness (as estimated by the random number generator)
118  * contained in the entropy pool.
119  *
120  * The /dev/urandom device does not have this limit, and will return
121  * as many bytes as were requested.  As more and more random bytes
122  * requested without giving time for the entropy pool to recharge,
123  * this will result in random numbers that are merely cryptographically
124  * strong.  For many applications, however, this is acceptable.
125  *
126  * Exported interfaces ---- input
127  * ==============================
128  *
129  * The current exported interfaces for gathering environmental noise
130  * from the devices are:
131  *
132  *	void add_true_randomness(int data);
133  *	void add_timer_randomness(int data);
134  *	void add_mouse_randomness(int mouse_data);
135  *	void add_net_randomness(int isr);
136  *	void add_tty_randomness(int c);
137  *	void add_disk_randomness(int n);
138  *	void add_audio_randomness(int n);
139  *	void add_video_randomness(int n);
140  *
141  * add_true_randomness() uses true random number generators present
142  * on some cryptographic and system chipsets.  Entropy accounting
143  * is not quitable, no timing is done, supplied 32 bits of pure entropy
144  * are hashed into the pool plain and blindly, increasing the counter.
145  *
146  * add_timer_randomness() uses the random driver itselves timing,
147  * measuring extract_entropy() and rndioctl() execution times.
148  *
149  * add_mouse_randomness() uses the mouse interrupt timing, as well as
150  * the reported position of the mouse from the hardware.
151  *
152  * add_net_randomness() times the finishing time of net input.
153  *
154  * add_tty_randomness() uses the inter-keypress timing, as well as the
155  * character as random inputs into the entropy pool.
156  *
157  * add_disk_randomness() times the finishing time of disk requests as well
158  * as feeding both xfer size & time into the entropy pool.
159  *
160  * add_audio_randomness() times the finishing of audio codec dma
161  * requests for both recording and playback, apparently supplies quite
162  * a lot of entropy. I'd blame it on low resolution audio clock generators.
163  *
164  * All of these routines (except for add_true_randomness() of course)
165  * try to estimate how many bits of randomness are in a particular
166  * randomness source.  They do this by keeping track of the first and
167  * second order deltas of the event timings.
168  *
169  * Ensuring unpredictability at system startup
170  * ============================================
171  *
172  * When any operating system starts up, it will go through a sequence
173  * of actions that are fairly predictable by an adversary, especially
174  * if the start-up does not involve interaction with a human operator.
175  * This reduces the actual number of bits of unpredictability in the
176  * entropy pool below the value in entropy_count.  In order to
177  * counteract this effect, it helps to carry information in the
178  * entropy pool across shut-downs and start-ups.  To do this, put the
179  * following lines in appropriate script which is run during the boot
180  * sequence:
181  *
182  *	echo "Initializing random number generator..."
183  *	# Carry a random seed from start-up to start-up
184  *	# Load and then save 512 bytes, which is the size of the entropy pool
185  *	if [ -f /etc/random-seed ]; then
186  *		cat /etc/random-seed >/dev/urandom
187  *	fi
188  *	dd if=/dev/urandom of=/etc/random-seed count=1
189  *
190  * and the following lines in appropriate script which is run when
191  * the system is shutting down:
192  *
193  *	# Carry a random seed from shut-down to start-up
194  *	# Save 512 bytes, which is the size of the entropy pool
195  *	echo "Saving random seed..."
196  *	dd if=/dev/urandom of=/etc/random-seed count=1
197  *
198  * For example, on OpenBSD systems, the appropriate scripts are
199  * usually /etc/rc.local and /etc/rc.shutdown, respectively.
200  *
201  * Effectively, these commands cause the contents of the entropy pool
202  * to be saved at shutdown time and reloaded into the entropy pool at
203  * start-up.  (The 'dd' in the addition to the bootup script is to
204  * make sure that /etc/random-seed is different for every start-up,
205  * even if the system crashes without executing rc.shutdown) Even with
206  * complete knowledge of the start-up activities, predicting the state
207  * of the entropy pool requires knowledge of the previous history of
208  * the system.
209  *
210  * Configuring the random(4) driver under OpenBSD
211  * ==============================================
212  *
213  * The special files for the random(4) driver should have been created
214  * during the installation process.  However, if your system does not have
215  * /dev/random and /dev/[s|u|p|a]random created already, they can be created
216  * by using the MAKEDEV(8) script in /dev:
217  *
218  *	/dev/MAKEDEV random
219  *
220  * Check MAKEDEV for information about major and minor numbers.
221  *
222  * Acknowledgements:
223  * =================
224  *
225  * Ideas for constructing this random number generator were derived
226  * from Pretty Good Privacy's random number generator, and from private
227  * discussions with Phil Karn.  Colin Plumb provided a faster random
228  * number generator, which speeds up the mixing function of the entropy
229  * pool, taken from PGPfone.  Dale Worley has also contributed many
230  * useful ideas and suggestions to improve this driver.
231  *
232  * Any flaws in the design are solely my responsibility, and should
233  * not be attributed to the Phil, Colin, or any of the authors of PGP.
234  *
235  * Further background information on this topic may be obtained from
236  * RFC 1750, "Randomness Recommendations for Security", by Donald
237  * Eastlake, Steve Crocker, and Jeff Schiller.
238  */
239 
240 #include <sys/param.h>
241 #include <sys/endian.h>
242 #include <sys/systm.h>
243 #include <sys/conf.h>
244 #include <sys/disk.h>
245 #include <sys/limits.h>
246 #include <sys/ioctl.h>
247 #include <sys/malloc.h>
248 #include <sys/fcntl.h>
249 #include <sys/vnode.h>
250 #include <sys/sysctl.h>
251 #include <sys/timeout.h>
252 #include <sys/poll.h>
253 #include <sys/mutex.h>
254 #include <sys/msgbuf.h>
255 
256 #include <crypto/md5.h>
257 #include <crypto/arc4.h>
258 
259 #include <dev/rndvar.h>
260 #include <dev/rndioctl.h>
261 
262 #ifdef	RNDEBUG
263 int	rnd_debug = 0x0000;
264 #define	RD_INPUT	0x000f	/* input data */
265 #define	RD_OUTPUT	0x00f0	/* output data */
266 #define	RD_WAIT		0x0100	/* sleep/wakeup for good data */
267 #endif
268 
269 /*
270  * Master random number pool functions
271  * -----------------------------------
272  */
273 
274 /*
275  * For the purposes of better mixing, we use the CRC-32 polynomial as
276  * well to make a twisted Generalized Feedback Shift Register
277  *
278  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
279  * Transactions on Modeling and Computer Simulation 2(3):179-194.
280  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
281  * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
282  *
283  * Thanks to Colin Plumb for suggesting this.
284  *
285  * We have not analyzed the resultant polynomial to prove it primitive;
286  * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
287  * of a random large-degree polynomial over GF(2) are more than large enough
288  * that periodicity is not a concern.
289  *
290  * The input hash is much less sensitive than the output hash.  All
291  * we want from it is to be a good non-cryptographic hash -
292  * i.e. to not produce collisions when fed "random" data of the sort
293  * we expect to see.  As long as the pool state differs for different
294  * inputs, we have preserved the input entropy and done a good job.
295  * The fact that an intelligent attacker can construct inputs that
296  * will produce controlled alterations to the pool's state is not
297  * important because we don't consider such inputs to contribute any
298  * randomness.  The only property we need with respect to them is that
299  * the attacker can't increase his/her knowledge of the pool's state.
300  * Since all additions are reversible (knowing the final state and the
301  * input, you can reconstruct the initial state), if an attacker has
302  * any uncertainty about the initial state, he/she can only shuffle
303  * that uncertainty about, but never cause any collisions (which would
304  * decrease the uncertainty).
305  *
306  * The chosen system lets the state of the pool be (essentially) the input
307  * modulo the generator polynomial.  Now, for random primitive polynomials,
308  * this is a universal class of hash functions, meaning that the chance
309  * of a collision is limited by the attacker's knowledge of the generator
310  * polynomial, so if it is chosen at random, an attacker can never force
311  * a collision.  Here, we use a fixed polynomial, but we *can* assume that
312  * ###--> it is unknown to the processes generating the input entropy. <-###
313  * Because of this important property, this is a good, collision-resistant
314  * hash; hash collisions will occur no more often than chance.
315  */
316 
317 /*
318  * Stirring polynomials over GF(2) for various pool sizes. Used in
319  * add_entropy_words() below.
320  *
321  * The polynomial terms are chosen to be evenly spaced (minimum RMS
322  * distance from evenly spaced; except for the last tap, which is 1 to
323  * get the twisting happening as fast as possible.
324  *
325  * The reultant polynomial is:
326  *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
327  */
328 #define POOLBITS	(POOLWORDS*32)
329 #define POOLBYTES	(POOLWORDS*4)
330 #define POOLMASK	(POOLWORDS - 1)
331 #if POOLWORDS == 2048
332 #define	POOL_TAP1	1638
333 #define	POOL_TAP2	1231
334 #define	POOL_TAP3	819
335 #define	POOL_TAP4	411
336 #elif POOLWORDS == 1024	/* also (819, 616, 410, 207, 2) */
337 #define	POOL_TAP1	817
338 #define	POOL_TAP2	615
339 #define	POOL_TAP3	412
340 #define	POOL_TAP4	204
341 #elif POOLWORDS == 512	/* also (409,307,206,102,2), (409,309,205,103,2) */
342 #define	POOL_TAP1	411
343 #define	POOL_TAP2	308
344 #define	POOL_TAP3	208
345 #define	POOL_TAP4	104
346 #elif POOLWORDS == 256
347 #define	POOL_TAP1	205
348 #define	POOL_TAP2	155
349 #define	POOL_TAP3	101
350 #define	POOL_TAP4	52
351 #elif POOLWORDS == 128	/* also (103, 78, 51, 27, 2) */
352 #define	POOL_TAP1	103
353 #define	POOL_TAP2	76
354 #define	POOL_TAP3	51
355 #define	POOL_TAP4	25
356 #elif POOLWORDS == 64
357 #define	POOL_TAP1	52
358 #define	POOL_TAP2	39
359 #define	POOL_TAP3	26
360 #define	POOL_TAP4	14
361 #elif POOLWORDS == 32
362 #define	POOL_TAP1	26
363 #define	POOL_TAP2	20
364 #define	POOL_TAP3	14
365 #define	POOL_TAP4	7
366 #else
367 #error No primitive polynomial available for chosen POOLWORDS
368 #endif
369 
370 static void dequeue_randomness(void *);
371 
372 /* Master kernel random number pool. */
373 struct random_bucket {
374 	u_int	add_ptr;
375 	u_int	entropy_count;
376 	u_char	input_rotate;
377 	u_int32_t pool[POOLWORDS];
378 	u_int	asleep;
379 	u_int	tmo;
380 };
381 struct random_bucket random_state;
382 struct mutex rndlock;
383 
384 /*
385  * This function adds a byte into the entropy pool.  It does not
386  * update the entropy estimate.  The caller must do this if appropriate.
387  *
388  * The pool is stirred with a polynomial of degree POOLWORDS over GF(2);
389  * see POOL_TAP[1-4] above
390  *
391  * Rotate the input word by a changing number of bits, to help assure
392  * that all bits in the entropy get toggled.  Otherwise, if the pool
393  * is consistently feed small numbers (such as keyboard scan codes)
394  * then the upper bits of the entropy pool will frequently remain
395  * untouched.
396  */
397 static void
398 add_entropy_words(const u_int32_t *buf, u_int n)
399 {
400 	/* derived from IEEE 802.3 CRC-32 */
401 	static const u_int32_t twist_table[8] = {
402 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
403 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
404 	};
405 
406 	for (; n--; buf++) {
407 		u_int32_t w = (*buf << random_state.input_rotate) |
408 		    (*buf >> (32 - random_state.input_rotate));
409 		u_int i = random_state.add_ptr =
410 		    (random_state.add_ptr - 1) & POOLMASK;
411 		/*
412 		 * Normally, we add 7 bits of rotation to the pool.
413 		 * At the beginning of the pool, add an extra 7 bits
414 		 * rotation, so that successive passes spread the
415 		 * input bits across the pool evenly.
416 		 */
417 		random_state.input_rotate =
418 		    (random_state.input_rotate + (i ? 7 : 14)) & 31;
419 
420 		/* XOR pool contents corresponding to polynomial terms */
421 		w ^= random_state.pool[(i + POOL_TAP1) & POOLMASK] ^
422 		     random_state.pool[(i + POOL_TAP2) & POOLMASK] ^
423 		     random_state.pool[(i + POOL_TAP3) & POOLMASK] ^
424 		     random_state.pool[(i + POOL_TAP4) & POOLMASK] ^
425 		     random_state.pool[(i + 1) & POOLMASK] ^
426 		     random_state.pool[i]; /* + 2^POOLWORDS */
427 
428 		random_state.pool[i] = (w >> 3) ^ twist_table[w & 7];
429 	}
430 }
431 
432 /*
433  * This function extracts randomness from the entropy pool, and
434  * returns it in a buffer.  This function computes how many remaining
435  * bits of entropy are left in the pool, but it does not restrict the
436  * number of bytes that are actually obtained.
437  */
438 static void
439 extract_entropy(u_int8_t *buf, int nbytes)
440 {
441 	struct random_bucket *rs = &random_state;
442 	u_char buffer[16];
443 	MD5_CTX tmp;
444 	u_int i;
445 
446 	add_timer_randomness(nbytes);
447 
448 	while (nbytes) {
449 		if (nbytes < sizeof(buffer) / 2)
450 			i = nbytes;
451 		else
452 			i = sizeof(buffer) / 2;
453 
454 		/* Hash the pool to get the output */
455 		MD5Init(&tmp);
456 		mtx_enter(&rndlock);
457 		MD5Update(&tmp, (u_int8_t*)rs->pool, sizeof(rs->pool));
458 		if (rs->entropy_count / 8 > i)
459 			rs->entropy_count -= i * 8;
460 		else
461 			rs->entropy_count = 0;
462 		mtx_leave(&rndlock);
463 		MD5Final(buffer, &tmp);
464 
465 		/*
466 		 * In case the hash function has some recognizable
467 		 * output pattern, we fold it in half.
468 		 */
469 		buffer[0] ^= buffer[15];
470 		buffer[1] ^= buffer[14];
471 		buffer[2] ^= buffer[13];
472 		buffer[3] ^= buffer[12];
473 		buffer[4] ^= buffer[11];
474 		buffer[5] ^= buffer[10];
475 		buffer[6] ^= buffer[ 9];
476 		buffer[7] ^= buffer[ 8];
477 
478 		/* Copy data to destination buffer */
479 		bcopy(buffer, buf, i);
480 		nbytes -= i;
481 		buf += i;
482 
483 		/* Modify pool so next hash will produce different results */
484 		add_timer_randomness(nbytes);
485 		dequeue_randomness(&random_state);
486 	}
487 
488 	/* Wipe data from memory */
489 	bzero(&tmp, sizeof(tmp));
490 	bzero(buffer, sizeof(buffer));
491 }
492 
493 /*
494  * Kernel-side entropy crediting API and handling of entropy-bearing events
495  * ------------------------------------------------------------------------
496  */
497 
498 /* pIII/333 reported to have some drops w/ these numbers */
499 #define QEVLEN (1024 / sizeof(struct rand_event))
500 #define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
501 #define QEVSBITS 10
502 
503 /* There is one of these per entropy source */
504 struct timer_rand_state {
505 	u_int	last_time;
506 	u_int	last_delta;
507 	u_int	last_delta2;
508 	u_int	dont_count_entropy : 1;
509 	u_int	max_entropy : 1;
510 };
511 
512 struct rand_event {
513 	struct timer_rand_state *re_state;
514 	u_int re_nbits;
515 	u_int re_time;
516 	u_int re_val;
517 };
518 
519 struct timer_rand_state rnd_states[RND_SRC_NUM];
520 struct rand_event rnd_event_space[QEVLEN];
521 struct rand_event *rnd_event_head = rnd_event_space;
522 struct rand_event *rnd_event_tail = rnd_event_space;
523 struct timeout rnd_timeout;
524 struct selinfo rnd_rsel;
525 struct rndstats rndstats;
526 int rnd_attached;
527 
528 /* must be called at a proper spl, returns ptr to the next event */
529 static __inline struct rand_event *
530 rnd_get(void)
531 {
532 	struct rand_event *p = rnd_event_tail;
533 
534 	if (p == rnd_event_head)
535 		return NULL;
536 
537 	if (p + 1 >= &rnd_event_space[QEVLEN])
538 		rnd_event_tail = rnd_event_space;
539 	else
540 		rnd_event_tail++;
541 
542 	return p;
543 }
544 
545 /* must be called at a proper spl, returns next available item */
546 static __inline struct rand_event *
547 rnd_put(void)
548 {
549 	struct rand_event *p = rnd_event_head + 1;
550 
551 	if (p >= &rnd_event_space[QEVLEN])
552 		p = rnd_event_space;
553 
554 	if (p == rnd_event_tail)
555 		return NULL;
556 
557 	return rnd_event_head = p;
558 }
559 
560 /* must be called at a proper spl, returns number of items in the queue */
561 static __inline int
562 rnd_qlen(void)
563 {
564 	int len = rnd_event_head - rnd_event_tail;
565 	return (len < 0)? -len : len;
566 }
567 
568 /*
569  * This function adds entropy to the entropy pool by using timing
570  * delays.  It uses the timer_rand_state structure to make an estimate
571  * of how many bits of entropy this call has added to the pool.
572  *
573  * The number "val" is also added to the pool - it should somehow describe
574  * the type of event which just happened.  Currently the values of 0-255
575  * are for keyboard scan codes, 256 and upwards - for interrupts.
576  * On the i386, this is assumed to be at most 16 bits, and the high bits
577  * are used for a high-resolution timer.
578  */
579 void
580 enqueue_randomness(int state, int val)
581 {
582 	struct timer_rand_state *p;
583 	struct rand_event *rep;
584 	struct timespec	tv;
585 	u_int	time, nbits;
586 
587 #ifdef DIAGNOSTIC
588 	if (state < 0 || state >= RND_SRC_NUM)
589 		return;
590 #endif
591 
592 	p = &rnd_states[state];
593 	val += state << 13;
594 
595 	if (!rnd_attached) {
596 		if ((rep = rnd_put()) == NULL) {
597 			rndstats.rnd_drops++;
598 			return;
599 		}
600 
601 		rep->re_state = &rnd_states[RND_SRC_TIMER];
602 		rep->re_nbits = 0;
603 		rep->re_time = 0;
604 		rep->re_time = val;
605 		return;
606 	}
607 
608 	nanotime(&tv);
609 	time = (tv.tv_nsec >> 10) + (tv.tv_sec << 20);
610 	nbits = 0;
611 
612 	/*
613 	 * Calculate the number of bits of randomness that we probably
614 	 * added.  We take into account the first and second order
615 	 * deltas in order to make our estimate.
616 	 */
617 	if (!p->dont_count_entropy) {
618 		int	delta, delta2, delta3;
619 		delta  = time   - p->last_time;
620 		delta2 = delta  - p->last_delta;
621 		delta3 = delta2 - p->last_delta2;
622 
623 		if (delta < 0) delta = -delta;
624 		if (delta2 < 0) delta2 = -delta2;
625 		if (delta3 < 0) delta3 = -delta3;
626 		if (delta > delta2) delta = delta2;
627 		if (delta > delta3) delta = delta3;
628 		delta3 = delta >>= 1;
629 		/*
630 		 * delta &= 0xfff;
631 		 * we don't do it since our time sheet is different from linux
632 		 */
633 
634 		if (delta & 0xffff0000) {
635 			nbits = 16;
636 			delta >>= 16;
637 		}
638 		if (delta & 0xff00) {
639 			nbits += 8;
640 			delta >>= 8;
641 		}
642 		if (delta & 0xf0) {
643 			nbits += 4;
644 			delta >>= 4;
645 		}
646 		if (delta & 0xc) {
647 			nbits += 2;
648 			delta >>= 2;
649 		}
650 		if (delta & 2) {
651 			nbits += 1;
652 			delta >>= 1;
653 		}
654 		if (delta & 1)
655 			nbits++;
656 
657 		/*
658 		 * the logic is to drop low-entropy entries,
659 		 * in hope for dequeuing to be more randomfull
660 		 */
661 		if (rnd_qlen() > QEVSLOW && nbits < QEVSBITS) {
662 			rndstats.rnd_drople++;
663 			return;
664 		}
665 		p->last_time = time;
666 		p->last_delta  = delta3;
667 		p->last_delta2 = delta2;
668 	} else if (p->max_entropy)
669 		nbits = 8 * sizeof(val) - 1;
670 
671 	mtx_enter(&rndlock);
672 	if ((rep = rnd_put()) == NULL) {
673 		rndstats.rnd_drops++;
674 		mtx_leave(&rndlock);
675 		return;
676 	}
677 
678 	rep->re_state = p;
679 	rep->re_nbits = nbits;
680 	rep->re_time = tv.tv_nsec ^ (tv.tv_sec << 20);
681 	rep->re_val = val;
682 
683 	rndstats.rnd_enqs++;
684 	rndstats.rnd_ed[nbits]++;
685 	rndstats.rnd_sc[state]++;
686 	rndstats.rnd_sb[state] += nbits;
687 
688 	if (rnd_qlen() > QEVSLOW/2 && !random_state.tmo) {
689 		random_state.tmo++;
690 		timeout_add(&rnd_timeout, 1);
691 	}
692 	mtx_leave(&rndlock);
693 }
694 
695 static void
696 dequeue_randomness(void *v)
697 {
698 	struct random_bucket *rs = v;
699 	struct rand_event *rep;
700 	u_int32_t buf[2];
701 	u_int nbits;
702 
703 	timeout_del(&rnd_timeout);
704 	rndstats.rnd_deqs++;
705 
706 	mtx_enter(&rndlock);
707 	while ((rep = rnd_get())) {
708 
709 		buf[0] = rep->re_time;
710 		buf[1] = rep->re_val;
711 		nbits = rep->re_nbits;
712 		mtx_leave(&rndlock);
713 
714 		add_entropy_words(buf, 2);
715 
716 		rndstats.rnd_total += nbits;
717 		rs->entropy_count += nbits;
718 		if (rs->entropy_count > POOLBITS)
719 			rs->entropy_count = POOLBITS;
720 
721 		if (rs->asleep && rs->entropy_count > 8) {
722 #ifdef	RNDEBUG
723 			if (rnd_debug & RD_WAIT)
724 				printf("rnd: wakeup[%u]{%u}\n",
725 				    rs->asleep,
726 				    rs->entropy_count);
727 #endif
728 			rs->asleep--;
729 			wakeup((void *)&rs->asleep);
730 			selwakeup(&rnd_rsel);
731 			KNOTE(&rnd_rsel.si_note, 0);
732 		}
733 
734 		mtx_enter(&rndlock);
735 	}
736 
737 	rs->tmo = 0;
738 	mtx_leave(&rndlock);
739 }
740 
741 /*
742  * Exported kernel CPRNG API: arc4random() and friends
743  * ---------------------------------------------------
744  */
745 
746 /*
747  * Maximum number of bytes to serve directly from the main arc4random
748  * pool. Larger requests are served from discrete arc4 instances keyed
749  * from the main pool.
750  */
751 #define ARC4_MAIN_MAX_BYTES	2048
752 
753 /*
754  * Key size (in bytes) for arc4 instances setup to serve requests larger
755  * than ARC4_MAIN_MAX_BYTES.
756  */
757 #define ARC4_SUB_KEY_BYTES	(256 / 8)
758 
759 struct timeout arc4_timeout;
760 struct rc4_ctx arc4random_state;
761 int arc4random_initialized;
762 u_long arc4random_count = 0;
763 
764 /*
765  * This function returns some number of good random numbers but is quite
766  * slow. Please use arc4random_buf() instead unless very high quality
767  * randomness is required.
768  * XXX: rename this
769  */
770 void
771 get_random_bytes(void *buf, size_t nbytes)
772 {
773 	extract_entropy((u_int8_t *) buf, nbytes);
774 	rndstats.rnd_used += nbytes * 8;
775 }
776 
777 static void
778 arc4_stir(void)
779 {
780 	u_int8_t buf[256];
781 	int len;
782 
783 	nanotime((struct timespec *) buf);
784 	len = sizeof(buf) - sizeof(struct timespec);
785 	get_random_bytes(buf + sizeof (struct timespec), len);
786 	len += sizeof(struct timespec);
787 
788 	mtx_enter(&rndlock);
789 	if (rndstats.arc4_nstirs > 0)
790 		rc4_crypt(&arc4random_state, buf, buf, sizeof(buf));
791 
792 	rc4_keysetup(&arc4random_state, buf, sizeof(buf));
793 	arc4random_count = 0;
794 	rndstats.arc4_stirs += len;
795 	rndstats.arc4_nstirs++;
796 
797 	/*
798 	 * Throw away the first N words of output, as suggested in the
799 	 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
800 	 * by Fluher, Mantin, and Shamir.  (N = 256 in our case.)
801 	 */
802 	rc4_skip(&arc4random_state, 256 * 4);
803 	mtx_leave(&rndlock);
804 }
805 
806 /*
807  * called by timeout to mark arc4 for stirring,
808  * actual stirring happens on any access attempt.
809  */
810 static void
811 arc4_reinit(void *v)
812 {
813 	arc4random_initialized = 0;
814 }
815 
816 static void
817 arc4maybeinit(void)
818 {
819 
820 	if (!arc4random_initialized) {
821 #ifdef DIAGNOSTIC
822 		if (!rnd_attached)
823 			panic("arc4maybeinit: premature");
824 #endif
825 		arc4random_initialized++;
826 		arc4_stir();
827 		/* 10 minutes, per dm@'s suggestion */
828 		timeout_add_sec(&arc4_timeout, 10 * 60);
829 	}
830 }
831 
832 void
833 randomattach(void)
834 {
835 	if (rnd_attached) {
836 #ifdef RNDEBUG
837 		printf("random: second attach\n");
838 #endif
839 		return;
840 	}
841 
842 	timeout_set(&rnd_timeout, dequeue_randomness, &random_state);
843 	timeout_set(&arc4_timeout, arc4_reinit, NULL);
844 
845 	random_state.add_ptr = 0;
846 	random_state.entropy_count = 0;
847 	rnd_states[RND_SRC_TIMER].dont_count_entropy = 1;
848 	rnd_states[RND_SRC_TRUE].dont_count_entropy = 1;
849 	rnd_states[RND_SRC_TRUE].max_entropy = 1;
850 
851 	mtx_init(&rndlock, IPL_HIGH);
852 	arc4_reinit(NULL);
853 
854 	if (msgbufp && msgbufp->msg_magic == MSG_MAGIC)
855 		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
856 		    msgbufp->msg_bufs / sizeof(u_int32_t));
857 	rnd_attached = 1;
858 }
859 
860 /* Return one word of randomness from an RC4 generator */
861 u_int32_t
862 arc4random(void)
863 {
864 	u_int32_t ret;
865 
866 	arc4maybeinit();
867 	mtx_enter(&rndlock);
868 	rc4_getbytes(&arc4random_state, (u_char*)&ret, sizeof(ret));
869 	rndstats.arc4_reads += sizeof(ret);
870 	arc4random_count += sizeof(ret);
871 	mtx_leave(&rndlock);
872 	return ret;
873 }
874 
875 /*
876  * Return a "large" buffer of randomness using an independantly-keyed RC4
877  * generator.
878  */
879 static void
880 arc4random_buf_large(void *buf, size_t n)
881 {
882 	u_char lbuf[ARC4_SUB_KEY_BYTES];
883 	struct rc4_ctx lctx;
884 
885 	mtx_enter(&rndlock);
886 	rc4_getbytes(&arc4random_state, lbuf, sizeof(lbuf));
887 	rndstats.arc4_reads += n;
888 	arc4random_count += sizeof(lbuf);
889 	mtx_leave(&rndlock);
890 
891 	rc4_keysetup(&lctx, lbuf, sizeof(lbuf));
892 	rc4_skip(&lctx, 256 * 4);
893 	rc4_getbytes(&lctx, (u_char*)buf, n);
894 	bzero(lbuf, sizeof(lbuf));
895 	bzero(&lctx, sizeof(lctx));
896 }
897 
898 /*
899  * Fill a buffer of arbitrary length with RC4-derived randomness.
900  */
901 void
902 arc4random_buf(void *buf, size_t n)
903 {
904 	arc4maybeinit();
905 
906 	/* Satisfy large requests via an independent ARC4 instance */
907 	if (n > ARC4_MAIN_MAX_BYTES) {
908 		arc4random_buf_large(buf, n);
909 		return;
910 	}
911 
912 	mtx_enter(&rndlock);
913 	rc4_getbytes(&arc4random_state, (u_char*)buf, n);
914 	rndstats.arc4_reads += n;
915 	arc4random_count += n;
916 	mtx_leave(&rndlock);
917 }
918 
919 /*
920  * Calculate a uniformly distributed random number less than upper_bound
921  * avoiding "modulo bias".
922  *
923  * Uniformity is achieved by generating new random numbers until the one
924  * returned is outside the range [0, 2**32 % upper_bound).  This
925  * guarantees the selected random number will be inside
926  * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
927  * after reduction modulo upper_bound.
928  */
929 u_int32_t
930 arc4random_uniform(u_int32_t upper_bound)
931 {
932 	u_int32_t r, min;
933 
934 	if (upper_bound < 2)
935 		return 0;
936 
937 #if (ULONG_MAX > 0xffffffffUL)
938 	min = 0x100000000UL % upper_bound;
939 #else
940 	/* Calculate (2**32 % upper_bound) avoiding 64-bit math */
941 	if (upper_bound > 0x80000000)
942 		min = 1 + ~upper_bound;		/* 2**32 - upper_bound */
943 	else {
944 		/* (2**32 - x) % x == 2**32 % x when x <= 2**31 */
945 		min = ((0xffffffff - upper_bound) + 1) % upper_bound;
946 	}
947 #endif
948 
949 	/*
950 	 * This could theoretically loop forever but each retry has
951 	 * p > 0.5 (worst case, usually far better) of selecting a
952 	 * number inside the range we need, so it should rarely need
953 	 * to re-roll.
954 	 */
955 	for (;;) {
956 		r = arc4random();
957 		if (r >= min)
958 			break;
959 	}
960 
961 	return r % upper_bound;
962 }
963 
964 /*
965  * random, srandom, urandom, arandom char devices
966  * -------------------------------------------------------
967  */
968 
969 void filt_rndrdetach(struct knote *kn);
970 int filt_rndread(struct knote *kn, long hint);
971 
972 struct filterops rndread_filtops =
973 	{ 1, NULL, filt_rndrdetach, filt_rndread};
974 
975 void filt_rndwdetach(struct knote *kn);
976 int filt_rndwrite(struct knote *kn, long hint);
977 
978 struct filterops rndwrite_filtops =
979 	{ 1, NULL, filt_rndwdetach, filt_rndwrite};
980 
981 struct selinfo rnd_wsel;
982 
983 int
984 randomopen(dev_t dev, int flag, int mode, struct proc *p)
985 {
986 	return (minor (dev) < RND_NODEV) ? 0 : ENXIO;
987 }
988 
989 int
990 randomclose(dev_t dev, int flag, int mode, struct proc *p)
991 {
992 	return 0;
993 }
994 
995 int
996 randomread(dev_t dev, struct uio *uio, int ioflag)
997 {
998 	int		ret = 0;
999 	u_int32_t 	*buf;
1000 
1001 	if (uio->uio_resid == 0)
1002 		return 0;
1003 
1004 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
1005 
1006 	while (!ret && uio->uio_resid > 0) {
1007 		int	n = min(POOLBYTES, uio->uio_resid);
1008 
1009 		switch(minor(dev)) {
1010 		case RND_RND:
1011 			ret = EIO;	/* no chip -- error */
1012 			break;
1013 		case RND_SRND:
1014 			if (random_state.entropy_count < 16 * 8) {
1015 				if (ioflag & IO_NDELAY) {
1016 					ret = EWOULDBLOCK;
1017 					break;
1018 				}
1019 #ifdef	RNDEBUG
1020 				if (rnd_debug & RD_WAIT)
1021 					printf("rnd: sleep[%u]\n",
1022 					    random_state.asleep);
1023 #endif
1024 				random_state.asleep++;
1025 				rndstats.rnd_waits++;
1026 				ret = tsleep(&random_state.asleep,
1027 				    PWAIT | PCATCH, "rndrd", 0);
1028 #ifdef	RNDEBUG
1029 				if (rnd_debug & RD_WAIT)
1030 					printf("rnd: awakened(%d)\n", ret);
1031 #endif
1032 				if (ret)
1033 					break;
1034 			}
1035 			if (n > random_state.entropy_count / 8)
1036 				n = random_state.entropy_count / 8;
1037 			rndstats.rnd_reads++;
1038 #ifdef	RNDEBUG
1039 			if (rnd_debug & RD_OUTPUT)
1040 				printf("rnd: %u possible output\n", n);
1041 #endif
1042 		case RND_URND:
1043 			get_random_bytes((char *)buf, n);
1044 #ifdef	RNDEBUG
1045 			if (rnd_debug & RD_OUTPUT)
1046 				printf("rnd: %u bytes for output\n", n);
1047 #endif
1048 			break;
1049 		case RND_ARND_OLD:
1050 		case RND_ARND:
1051 			arc4random_buf(buf, n);
1052 			break;
1053 		default:
1054 			ret = ENXIO;
1055 		}
1056 		if (n != 0 && ret == 0) {
1057 			ret = uiomove((caddr_t)buf, n, uio);
1058 			if (!ret && uio->uio_resid > 0)
1059 				yield();
1060 		}
1061 	}
1062 
1063 	free(buf, M_TEMP);
1064 	return ret;
1065 }
1066 
1067 int
1068 randompoll(dev_t dev, int events, struct proc *p)
1069 {
1070 	int revents;
1071 
1072 	revents = events & (POLLOUT | POLLWRNORM);	/* always writable */
1073 	if (events & (POLLIN | POLLRDNORM)) {
1074 		if (minor(dev) == RND_SRND && random_state.entropy_count <= 0)
1075 			selrecord(p, &rnd_rsel);
1076 		else
1077 			revents |= events & (POLLIN | POLLRDNORM);
1078 	}
1079 
1080 	return (revents);
1081 }
1082 
1083 int
1084 randomkqfilter(dev_t dev, struct knote *kn)
1085 {
1086 	struct klist *klist;
1087 
1088 	switch (kn->kn_filter) {
1089 	case EVFILT_READ:
1090 		klist = &rnd_rsel.si_note;
1091 		kn->kn_fop = &rndread_filtops;
1092 		break;
1093 	case EVFILT_WRITE:
1094 		klist = &rnd_wsel.si_note;
1095 		kn->kn_fop = &rndwrite_filtops;
1096 		break;
1097 	default:
1098 		return (1);
1099 	}
1100 	kn->kn_hook = (void *)&random_state;
1101 
1102 	mtx_enter(&rndlock);
1103 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1104 	mtx_leave(&rndlock);
1105 
1106 	return (0);
1107 }
1108 
1109 void
1110 filt_rndrdetach(struct knote *kn)
1111 {
1112 	mtx_enter(&rndlock);
1113 	SLIST_REMOVE(&rnd_rsel.si_note, kn, knote, kn_selnext);
1114 	mtx_leave(&rndlock);
1115 }
1116 
1117 int
1118 filt_rndread(struct knote *kn, long hint)
1119 {
1120 	struct random_bucket *rs = (struct random_bucket *)kn->kn_hook;
1121 
1122 	kn->kn_data = (int)rs->entropy_count;
1123 	return rs->entropy_count > 0;
1124 }
1125 
1126 void
1127 filt_rndwdetach(struct knote *kn)
1128 {
1129 	mtx_enter(&rndlock);
1130 	SLIST_REMOVE(&rnd_wsel.si_note, kn, knote, kn_selnext);
1131 	mtx_leave(&rndlock);
1132 }
1133 
1134 int
1135 filt_rndwrite(struct knote *kn, long hint)
1136 {
1137 	return (1);
1138 }
1139 
1140 int
1141 randomwrite(dev_t dev, struct uio *uio, int flags)
1142 {
1143 	int		ret = 0;
1144 	u_int32_t	*buf;
1145 
1146 	if (minor(dev) == RND_RND)
1147 		return ENXIO;
1148 
1149 	if (uio->uio_resid == 0)
1150 		return 0;
1151 
1152 	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
1153 
1154 	while (!ret && uio->uio_resid > 0) {
1155 		u_short	n = min(POOLBYTES, uio->uio_resid);
1156 
1157 		ret = uiomove((caddr_t)buf, n, uio);
1158 		if (!ret) {
1159 			while (n % sizeof(u_int32_t))
1160 				((u_int8_t *) buf)[n++] = 0;
1161 			add_entropy_words(buf, n / 4);
1162 		}
1163 	}
1164 
1165 	if ((minor(dev) == RND_ARND || minor(dev) == RND_ARND_OLD) &&
1166 	    !ret)
1167 		arc4random_initialized = 0;
1168 
1169 	free(buf, M_TEMP);
1170 	return ret;
1171 }
1172 
1173 int
1174 randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1175 {
1176 	int	ret = 0;
1177 	u_int	cnt;
1178 
1179 	add_timer_randomness((u_long)p ^ (u_long)data ^ cmd);
1180 
1181 	switch (cmd) {
1182 	case FIOASYNC:
1183 		/* rnd has no async flag in softc so this is really a no-op. */
1184 		break;
1185 
1186 	case FIONBIO:
1187 		/* Handled in the upper FS layer. */
1188 		break;
1189 
1190 	case RNDGETENTCNT:
1191 		mtx_enter(&rndlock);
1192 		*(u_int *)data = random_state.entropy_count;
1193 		mtx_leave(&rndlock);
1194 		break;
1195 	case RNDADDTOENTCNT:
1196 		if (suser(p, 0) != 0)
1197 			ret = EPERM;
1198 		else {
1199 			cnt = *(u_int *)data;
1200 			mtx_enter(&rndlock);
1201 			random_state.entropy_count += cnt;
1202 			if (random_state.entropy_count > POOLBITS)
1203 				random_state.entropy_count = POOLBITS;
1204 			mtx_leave(&rndlock);
1205 		}
1206 		break;
1207 	case RNDZAPENTCNT:
1208 		if (suser(p, 0) != 0)
1209 			ret = EPERM;
1210 		else {
1211 			mtx_enter(&rndlock);
1212 			random_state.entropy_count = 0;
1213 			mtx_leave(&rndlock);
1214 		}
1215 		break;
1216 	case RNDSTIRARC4:
1217 		if (suser(p, 0) != 0)
1218 			ret = EPERM;
1219 		else if (random_state.entropy_count < 64)
1220 			ret = EAGAIN;
1221 		else {
1222 			mtx_enter(&rndlock);
1223 			arc4random_initialized = 0;
1224 			mtx_leave(&rndlock);
1225 		}
1226 		break;
1227 	case RNDCLRSTATS:
1228 		if (suser(p, 0) != 0)
1229 			ret = EPERM;
1230 		else {
1231 			mtx_enter(&rndlock);
1232 			bzero(&rndstats, sizeof(rndstats));
1233 			mtx_leave(&rndlock);
1234 		}
1235 		break;
1236 	default:
1237 		ret = ENOTTY;
1238 	}
1239 
1240 	add_timer_randomness((u_long)p ^ (u_long)data ^ cmd);
1241 	return ret;
1242 }
1243