xref: /netbsd-src/sys/kern/kern_entropy.c (revision f68d28ccb5df2dfe20ddb2c038cc8840e47295bd)
1 /*	$NetBSD: kern_entropy.c,v 1.61 2023/05/24 20:22:23 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Entropy subsystem
34  *
35  *	* Each CPU maintains a per-CPU entropy pool so that gathering
36  *	  entropy requires no interprocessor synchronization, except
37  *	  early at boot when we may be scrambling to gather entropy as
38  *	  soon as possible.
39  *
40  *	  - entropy_enter gathers entropy and never drops it on the
41  *	    floor, at the cost of sometimes having to do cryptography.
42  *
43  *	  - entropy_enter_intr gathers entropy or drops it on the
44  *	    floor, with low latency.  Work to stir the pool or kick the
45  *	    housekeeping thread is scheduled in soft interrupts.
46  *
47  *	* entropy_enter immediately enters into the global pool if it
48  *	  can transition to full entropy in one swell foop.  Otherwise,
49  *	  it defers to a housekeeping thread that consolidates entropy,
50  *	  but only when the CPUs collectively have full entropy, in
51  *	  order to mitigate iterative-guessing attacks.
52  *
53  *	* The entropy housekeeping thread continues to consolidate
54  *	  entropy even after we think we have full entropy, in case we
55  *	  are wrong, but is limited to one discretionary consolidation
56  *	  per minute, and only when new entropy is actually coming in,
57  *	  to limit performance impact.
58  *
59  *	* The entropy epoch is the number that changes when we
60  *	  transition from partial entropy to full entropy, so that
61  *	  users can easily determine when to reseed.  This also
62  *	  facilitates an operator explicitly causing everything to
63  *	  reseed by sysctl -w kern.entropy.consolidate=1.
64  *
65  *	* No entropy estimation based on the sample values, which is a
66  *	  contradiction in terms and a potential source of side
67  *	  channels.  It is the responsibility of the driver author to
68  *	  study how predictable the physical source of input can ever
69  *	  be, and to furnish a lower bound on the amount of entropy it
70  *	  has.
71  *
72  *	* Entropy depletion is available for testing (or if you're into
73  *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
74  *	  the logic to support it is small, to minimize chance of bugs.
75  */
76 
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.61 2023/05/24 20:22:23 riastradh Exp $");
79 
80 #include <sys/param.h>
81 #include <sys/types.h>
82 #include <sys/atomic.h>
83 #include <sys/compat_stub.h>
84 #include <sys/condvar.h>
85 #include <sys/cpu.h>
86 #include <sys/entropy.h>
87 #include <sys/errno.h>
88 #include <sys/evcnt.h>
89 #include <sys/event.h>
90 #include <sys/file.h>
91 #include <sys/intr.h>
92 #include <sys/kauth.h>
93 #include <sys/kernel.h>
94 #include <sys/kmem.h>
95 #include <sys/kthread.h>
96 #include <sys/lwp.h>
97 #include <sys/module_hook.h>
98 #include <sys/mutex.h>
99 #include <sys/percpu.h>
100 #include <sys/poll.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/reboot.h>
104 #include <sys/rnd.h>		/* legacy kernel API */
105 #include <sys/rndio.h>		/* userland ioctl interface */
106 #include <sys/rndsource.h>	/* kernel rndsource driver API */
107 #include <sys/select.h>
108 #include <sys/selinfo.h>
109 #include <sys/sha1.h>		/* for boot seed checksum */
110 #include <sys/stdint.h>
111 #include <sys/sysctl.h>
112 #include <sys/syslog.h>
113 #include <sys/systm.h>
114 #include <sys/time.h>
115 #include <sys/xcall.h>
116 
117 #include <lib/libkern/entpool.h>
118 
119 #include <machine/limits.h>
120 
121 #ifdef __HAVE_CPU_COUNTER
122 #include <machine/cpu_counter.h>
123 #endif
124 
125 /*
126  * struct entropy_cpu
127  *
128  *	Per-CPU entropy state.  The pool is allocated separately
129  *	because percpu(9) sometimes moves per-CPU objects around
130  *	without zeroing them, which would lead to unwanted copies of
131  *	sensitive secrets.  The evcnt is allocated separately because
132  *	evcnt(9) assumes it stays put in memory.
133  */
134 struct entropy_cpu {
135 	struct entropy_cpu_evcnt {
136 		struct evcnt		softint;
137 		struct evcnt		intrdrop;
138 		struct evcnt		intrtrunc;
139 	}			*ec_evcnt;
140 	struct entpool		*ec_pool;
141 	unsigned		ec_pending;
142 	bool			ec_locked;
143 };
144 
145 /*
146  * struct entropy_cpu_lock
147  *
148  *	State for locking the per-CPU entropy state.
149  */
150 struct entropy_cpu_lock {
151 	int		ecl_s;
152 	uint64_t	ecl_ncsw;
153 };
154 
155 /*
156  * struct rndsource_cpu
157  *
158  *	Per-CPU rndsource state.
159  */
160 struct rndsource_cpu {
161 	unsigned		rc_entropybits;
162 	unsigned		rc_timesamples;
163 	unsigned		rc_datasamples;
164 };
165 
166 /*
167  * entropy_global (a.k.a. E for short in this file)
168  *
169  *	Global entropy state.  Writes protected by the global lock.
170  *	Some fields, marked (A), can be read outside the lock, and are
171  *	maintained with atomic_load/store_relaxed.
172  */
173 struct {
174 	kmutex_t	lock;		/* covers all global state */
175 	struct entpool	pool;		/* global pool for extraction */
176 	unsigned	needed;		/* (A) needed globally */
177 	unsigned	pending;	/* (A) pending in per-CPU pools */
178 	unsigned	timestamp;	/* (A) time of last consolidation */
179 	unsigned	epoch;		/* (A) changes when needed -> 0 */
180 	kcondvar_t	cv;		/* notifies state changes */
181 	struct selinfo	selq;		/* notifies needed -> 0 */
182 	struct lwp	*sourcelock;	/* lock on list of sources */
183 	kcondvar_t	sourcelock_cv;	/* notifies sourcelock release */
184 	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
185 	enum entropy_stage {
186 		ENTROPY_COLD = 0, /* single-threaded */
187 		ENTROPY_WARM,	  /* multi-threaded at boot before CPUs */
188 		ENTROPY_HOT,	  /* multi-threaded multi-CPU */
189 	}		stage;
190 	bool		consolidate;	/* kick thread to consolidate */
191 	bool		seed_rndsource;	/* true if seed source is attached */
192 	bool		seeded;		/* true if seed file already loaded */
193 } entropy_global __cacheline_aligned = {
194 	/* Fields that must be initialized when the kernel is loaded.  */
195 	.needed = ENTROPY_CAPACITY*NBBY,
196 	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
197 	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
198 	.stage = ENTROPY_COLD,
199 };
200 
201 #define	E	(&entropy_global)	/* declutter */
202 
203 /* Read-mostly globals */
204 static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
205 static void		*entropy_sih __read_mostly; /* softint handler */
206 static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
207 
208 static struct krndsource seed_rndsource __read_mostly;
209 
210 /*
211  * Event counters
212  *
213  *	Must be careful with adding these because they can serve as
214  *	side channels.
215  */
216 static struct evcnt entropy_discretionary_evcnt =
217     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
218 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
219 static struct evcnt entropy_immediate_evcnt =
220     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
221 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
222 static struct evcnt entropy_partial_evcnt =
223     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
224 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
225 static struct evcnt entropy_consolidate_evcnt =
226     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
227 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
228 static struct evcnt entropy_extract_fail_evcnt =
229     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
230 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
231 static struct evcnt entropy_request_evcnt =
232     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
233 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
234 static struct evcnt entropy_deplete_evcnt =
235     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
236 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
237 static struct evcnt entropy_notify_evcnt =
238     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
239 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
240 
241 /* Sysctl knobs */
242 static bool	entropy_collection = 1;
243 static bool	entropy_depletion = 0; /* Silly!  */
244 
245 static const struct sysctlnode	*entropy_sysctlroot;
246 static struct sysctllog		*entropy_sysctllog;
247 
248 /* Forward declarations */
249 static void	entropy_init_cpu(void *, void *, struct cpu_info *);
250 static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
251 static void	entropy_account_cpu(struct entropy_cpu *);
252 static void	entropy_enter(const void *, size_t, unsigned);
253 static bool	entropy_enter_intr(const void *, size_t, unsigned);
254 static void	entropy_softintr(void *);
255 static void	entropy_thread(void *);
256 static uint32_t	entropy_pending(void);
257 static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
258 static void	entropy_do_consolidate(void);
259 static void	entropy_consolidate_xc(void *, void *);
260 static void	entropy_notify(void);
261 static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
262 static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
263 static void	filt_entropy_read_detach(struct knote *);
264 static int	filt_entropy_read_event(struct knote *, long);
265 static int	entropy_request(size_t, int);
266 static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
267 		    uint32_t, uint32_t);
268 static unsigned	rndsource_entropybits(struct krndsource *);
269 static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
270 static void	rndsource_to_user(struct krndsource *, rndsource_t *);
271 static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
272 static void	rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
273 
274 /*
275  * entropy_timer()
276  *
277  *	Cycle counter, time counter, or anything that changes a wee bit
278  *	unpredictably.
279  */
280 static inline uint32_t
281 entropy_timer(void)
282 {
283 	struct bintime bt;
284 	uint32_t v;
285 
286 	/* If we have a CPU cycle counter, use the low 32 bits.  */
287 #ifdef __HAVE_CPU_COUNTER
288 	if (__predict_true(cpu_hascounter()))
289 		return cpu_counter32();
290 #endif	/* __HAVE_CPU_COUNTER */
291 
292 	/* If we're cold, tough.  Can't binuptime while cold.  */
293 	if (__predict_false(cold))
294 		return 0;
295 
296 	/* Fold the 128 bits of binuptime into 32 bits.  */
297 	binuptime(&bt);
298 	v = bt.frac;
299 	v ^= bt.frac >> 32;
300 	v ^= bt.sec;
301 	v ^= bt.sec >> 32;
302 	return v;
303 }
304 
305 static void
306 attach_seed_rndsource(void)
307 {
308 
309 	/*
310 	 * First called no later than entropy_init, while we are still
311 	 * single-threaded, so no need for RUN_ONCE.
312 	 */
313 	if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
314 		return;
315 	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
316 	    RND_FLAG_COLLECT_VALUE);
317 	E->seed_rndsource = true;
318 }
319 
320 /*
321  * entropy_init()
322  *
323  *	Initialize the entropy subsystem.  Panic on failure.
324  *
325  *	Requires percpu(9) and sysctl(9) to be initialized.
326  */
327 static void
328 entropy_init(void)
329 {
330 	uint32_t extra[2];
331 	struct krndsource *rs;
332 	unsigned i = 0;
333 
334 	KASSERT(E->stage == ENTROPY_COLD);
335 
336 	/* Grab some cycle counts early at boot.  */
337 	extra[i++] = entropy_timer();
338 
339 	/* Run the entropy pool cryptography self-test.  */
340 	if (entpool_selftest() == -1)
341 		panic("entropy pool crypto self-test failed");
342 
343 	/* Create the sysctl directory.  */
344 	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
345 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
346 	    SYSCTL_DESCR("Entropy (random number sources) options"),
347 	    NULL, 0, NULL, 0,
348 	    CTL_KERN, CTL_CREATE, CTL_EOL);
349 
350 	/* Create the sysctl knobs.  */
351 	/* XXX These shouldn't be writable at securelevel>0.  */
352 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
353 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
354 	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
355 	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
356 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
357 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
358 	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
359 	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
360 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
361 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
362 	    SYSCTL_DESCR("Trigger entropy consolidation now"),
363 	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
364 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
365 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
366 	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
367 	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
368 	/* XXX These should maybe not be readable at securelevel>0.  */
369 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
370 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
371 	    "needed", SYSCTL_DESCR("Systemwide entropy deficit"),
372 	    NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL);
373 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
374 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
375 	    "pending", SYSCTL_DESCR("Entropy pending on CPUs"),
376 	    NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL);
377 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
378 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
379 	    "epoch", SYSCTL_DESCR("Entropy epoch"),
380 	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
381 
382 	/* Initialize the global state for multithreaded operation.  */
383 	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
384 	cv_init(&E->cv, "entropy");
385 	selinit(&E->selq);
386 	cv_init(&E->sourcelock_cv, "entsrclock");
387 
388 	/* Make sure the seed source is attached.  */
389 	attach_seed_rndsource();
390 
391 	/* Note if the bootloader didn't provide a seed.  */
392 	if (!E->seeded)
393 		aprint_debug("entropy: no seed from bootloader\n");
394 
395 	/* Allocate the per-CPU records for all early entropy sources.  */
396 	LIST_FOREACH(rs, &E->sources, list)
397 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
398 
399 	/* Allocate and initialize the per-CPU state.  */
400 	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
401 	    entropy_init_cpu, entropy_fini_cpu, NULL);
402 
403 	/* Enter the boot cycle count to get started.  */
404 	extra[i++] = entropy_timer();
405 	KASSERT(i == __arraycount(extra));
406 	entropy_enter(extra, sizeof extra, 0);
407 	explicit_memset(extra, 0, sizeof extra);
408 
409 	/* We are now ready for multi-threaded operation.  */
410 	E->stage = ENTROPY_WARM;
411 }
412 
413 static void
414 entropy_init_late_cpu(void *a, void *b)
415 {
416 	int bound;
417 
418 	/*
419 	 * We're not necessarily in a softint lwp here (xc_broadcast
420 	 * triggers softint on other CPUs, but calls directly on this
421 	 * CPU), so explicitly bind to the current CPU to invoke the
422 	 * softintr -- this lets us have a simpler assertion in
423 	 * entropy_account_cpu.  Not necessary to avoid migration
424 	 * because xc_broadcast disables kpreemption anyway, but it
425 	 * doesn't hurt.
426 	 */
427 	bound = curlwp_bind();
428 	entropy_softintr(NULL);
429 	curlwp_bindx(bound);
430 }
431 
432 /*
433  * entropy_init_late()
434  *
435  *	Late initialization.  Panic on failure.
436  *
437  *	Requires CPUs to have been detected and LWPs to have started.
438  */
439 static void
440 entropy_init_late(void)
441 {
442 	void *sih;
443 	int error;
444 
445 	KASSERT(E->stage == ENTROPY_WARM);
446 
447 	/*
448 	 * Establish the softint at the highest softint priority level.
449 	 * Must happen after CPU detection.
450 	 */
451 	sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
452 	    &entropy_softintr, NULL);
453 	if (sih == NULL)
454 		panic("unable to establish entropy softint");
455 
456 	/*
457 	 * Create the entropy housekeeping thread.  Must happen after
458 	 * lwpinit.
459 	 */
460 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
461 	    entropy_thread, NULL, &entropy_lwp, "entbutler");
462 	if (error)
463 		panic("unable to create entropy housekeeping thread: %d",
464 		    error);
465 
466 	/*
467 	 * Wait until the per-CPU initialization has hit all CPUs
468 	 * before proceeding to mark the entropy system hot and
469 	 * enabling use of the softint.
470 	 */
471 	xc_barrier(XC_HIGHPRI);
472 	E->stage = ENTROPY_HOT;
473 	atomic_store_relaxed(&entropy_sih, sih);
474 
475 	/*
476 	 * At this point, entering new samples from interrupt handlers
477 	 * will trigger the softint to process them.  But there may be
478 	 * some samples that were entered from interrupt handlers
479 	 * before the softint was available.  Make sure we process
480 	 * those samples on all CPUs by running the softint logic on
481 	 * all CPUs.
482 	 */
483 	xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL));
484 }
485 
486 /*
487  * entropy_init_cpu(ptr, cookie, ci)
488  *
489  *	percpu(9) constructor for per-CPU entropy pool.
490  */
491 static void
492 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
493 {
494 	struct entropy_cpu *ec = ptr;
495 	const char *cpuname;
496 
497 	ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP);
498 	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
499 	ec->ec_pending = 0;
500 	ec->ec_locked = false;
501 
502 	/* XXX ci_cpuname may not be initialized early enough.  */
503 	cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname;
504 	evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL,
505 	    cpuname, "entropy softint");
506 	evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL,
507 	    cpuname, "entropy intrdrop");
508 	evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL,
509 	    cpuname, "entropy intrtrunc");
510 }
511 
512 /*
513  * entropy_fini_cpu(ptr, cookie, ci)
514  *
515  *	percpu(9) destructor for per-CPU entropy pool.
516  */
517 static void
518 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
519 {
520 	struct entropy_cpu *ec = ptr;
521 
522 	/*
523 	 * Zero any lingering data.  Disclosure of the per-CPU pool
524 	 * shouldn't retroactively affect the security of any keys
525 	 * generated, because entpool(9) erases whatever we have just
526 	 * drawn out of any pool, but better safe than sorry.
527 	 */
528 	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
529 
530 	evcnt_detach(&ec->ec_evcnt->intrtrunc);
531 	evcnt_detach(&ec->ec_evcnt->intrdrop);
532 	evcnt_detach(&ec->ec_evcnt->softint);
533 
534 	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
535 	kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt));
536 }
537 
538 /*
539  * ec = entropy_cpu_get(&lock)
540  * entropy_cpu_put(&lock, ec)
541  *
542  *	Lock and unlock the per-CPU entropy state.  This only prevents
543  *	access on the same CPU -- by hard interrupts, by soft
544  *	interrupts, or by other threads.
545  *
546  *	Blocks soft interrupts and preemption altogether; doesn't block
547  *	hard interrupts, but causes samples in hard interrupts to be
548  *	dropped.
549  */
550 static struct entropy_cpu *
551 entropy_cpu_get(struct entropy_cpu_lock *lock)
552 {
553 	struct entropy_cpu *ec;
554 
555 	ec = percpu_getref(entropy_percpu);
556 	lock->ecl_s = splsoftserial();
557 	KASSERT(!ec->ec_locked);
558 	ec->ec_locked = true;
559 	lock->ecl_ncsw = curlwp->l_ncsw;
560 	__insn_barrier();
561 
562 	return ec;
563 }
564 
565 static void
566 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
567 {
568 
569 	KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu()));
570 	KASSERT(ec->ec_locked);
571 
572 	__insn_barrier();
573 	KASSERT(lock->ecl_ncsw == curlwp->l_ncsw);
574 	ec->ec_locked = false;
575 	splx(lock->ecl_s);
576 	percpu_putref(entropy_percpu);
577 }
578 
579 /*
580  * entropy_seed(seed)
581  *
582  *	Seed the entropy pool with seed.  Meant to be called as early
583  *	as possible by the bootloader; may be called before or after
584  *	entropy_init.  Must be called before system reaches userland.
585  *	Must be called in thread or soft interrupt context, not in hard
586  *	interrupt context.  Must be called at most once.
587  *
588  *	Overwrites the seed in place.  Caller may then free the memory.
589  */
590 static void
591 entropy_seed(rndsave_t *seed)
592 {
593 	SHA1_CTX ctx;
594 	uint8_t digest[SHA1_DIGEST_LENGTH];
595 	bool seeded;
596 
597 	/*
598 	 * Verify the checksum.  If the checksum fails, take the data
599 	 * but ignore the entropy estimate -- the file may have been
600 	 * incompletely written with garbage, which is harmless to add
601 	 * but may not be as unpredictable as alleged.
602 	 */
603 	SHA1Init(&ctx);
604 	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
605 	SHA1Update(&ctx, seed->data, sizeof(seed->data));
606 	SHA1Final(digest, &ctx);
607 	CTASSERT(sizeof(seed->digest) == sizeof(digest));
608 	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
609 		printf("entropy: invalid seed checksum\n");
610 		seed->entropy = 0;
611 	}
612 	explicit_memset(&ctx, 0, sizeof ctx);
613 	explicit_memset(digest, 0, sizeof digest);
614 
615 	/*
616 	 * If the entropy is insensibly large, try byte-swapping.
617 	 * Otherwise assume the file is corrupted and act as though it
618 	 * has zero entropy.
619 	 */
620 	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
621 		seed->entropy = bswap32(seed->entropy);
622 		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
623 			seed->entropy = 0;
624 	}
625 
626 	/* Make sure the seed source is attached.  */
627 	attach_seed_rndsource();
628 
629 	/* Test and set E->seeded.  */
630 	if (E->stage >= ENTROPY_WARM)
631 		mutex_enter(&E->lock);
632 	seeded = E->seeded;
633 	E->seeded = (seed->entropy > 0);
634 	if (E->stage >= ENTROPY_WARM)
635 		mutex_exit(&E->lock);
636 
637 	/*
638 	 * If we've been seeded, may be re-entering the same seed
639 	 * (e.g., bootloader vs module init, or something).  No harm in
640 	 * entering it twice, but it contributes no additional entropy.
641 	 */
642 	if (seeded) {
643 		printf("entropy: double-seeded by bootloader\n");
644 		seed->entropy = 0;
645 	} else {
646 		printf("entropy: entering seed from bootloader"
647 		    " with %u bits of entropy\n", (unsigned)seed->entropy);
648 	}
649 
650 	/* Enter it into the pool and promptly zero it.  */
651 	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
652 	    seed->entropy);
653 	explicit_memset(seed, 0, sizeof(*seed));
654 }
655 
656 /*
657  * entropy_bootrequest()
658  *
659  *	Request entropy from all sources at boot, once config is
660  *	complete and interrupts are running.
661  */
662 void
663 entropy_bootrequest(void)
664 {
665 	int error;
666 
667 	KASSERT(E->stage >= ENTROPY_WARM);
668 
669 	/*
670 	 * Request enough to satisfy the maximum entropy shortage.
671 	 * This is harmless overkill if the bootloader provided a seed.
672 	 */
673 	mutex_enter(&E->lock);
674 	error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT);
675 	KASSERT(error == 0);
676 	mutex_exit(&E->lock);
677 }
678 
679 /*
680  * entropy_epoch()
681  *
682  *	Returns the current entropy epoch.  If this changes, you should
683  *	reseed.  If -1, means system entropy has not yet reached full
684  *	entropy or been explicitly consolidated; never reverts back to
685  *	-1.  Never zero, so you can always use zero as an uninitialized
686  *	sentinel value meaning `reseed ASAP'.
687  *
688  *	Usage model:
689  *
690  *		struct foo {
691  *			struct crypto_prng prng;
692  *			unsigned epoch;
693  *		} *foo;
694  *
695  *		unsigned epoch = entropy_epoch();
696  *		if (__predict_false(epoch != foo->epoch)) {
697  *			uint8_t seed[32];
698  *			if (entropy_extract(seed, sizeof seed, 0) != 0)
699  *				warn("no entropy");
700  *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
701  *			foo->epoch = epoch;
702  *		}
703  */
704 unsigned
705 entropy_epoch(void)
706 {
707 
708 	/*
709 	 * Unsigned int, so no need for seqlock for an atomic read, but
710 	 * make sure we read it afresh each time.
711 	 */
712 	return atomic_load_relaxed(&E->epoch);
713 }
714 
715 /*
716  * entropy_ready()
717  *
718  *	True if the entropy pool has full entropy.
719  */
720 bool
721 entropy_ready(void)
722 {
723 
724 	return atomic_load_relaxed(&E->needed) == 0;
725 }
726 
727 /*
728  * entropy_account_cpu(ec)
729  *
730  *	Consider whether to consolidate entropy into the global pool
731  *	after we just added some into the current CPU's pending pool.
732  *
733  *	- If this CPU can provide enough entropy now, do so.
734  *
735  *	- If this and whatever else is available on other CPUs can
736  *	  provide enough entropy, kick the consolidation thread.
737  *
738  *	- Otherwise, do as little as possible, except maybe consolidate
739  *	  entropy at most once a minute.
740  *
741  *	Caller must be bound to a CPU and therefore have exclusive
742  *	access to ec.  Will acquire and release the global lock.
743  */
744 static void
745 entropy_account_cpu(struct entropy_cpu *ec)
746 {
747 	struct entropy_cpu_lock lock;
748 	struct entropy_cpu *ec0;
749 	unsigned diff;
750 
751 	KASSERT(E->stage >= ENTROPY_WARM);
752 	KASSERT(curlwp->l_pflag & LP_BOUND);
753 
754 	/*
755 	 * If there's no entropy needed, and entropy has been
756 	 * consolidated in the last minute, do nothing.
757 	 */
758 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
759 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
760 	    __predict_true((time_uptime - E->timestamp) <= 60))
761 		return;
762 
763 	/*
764 	 * Consider consolidation, under the global lock and with the
765 	 * per-CPU state locked.
766 	 */
767 	mutex_enter(&E->lock);
768 	ec0 = entropy_cpu_get(&lock);
769 	KASSERT(ec0 == ec);
770 	if (ec->ec_pending == 0) {
771 		/* Raced with consolidation xcall.  Nothing to do.  */
772 	} else if (E->needed != 0 && E->needed <= ec->ec_pending) {
773 		/*
774 		 * If we have not yet attained full entropy but we can
775 		 * now, do so.  This way we disseminate entropy
776 		 * promptly when it becomes available early at boot;
777 		 * otherwise we leave it to the entropy consolidation
778 		 * thread, which is rate-limited to mitigate side
779 		 * channels and abuse.
780 		 */
781 		uint8_t buf[ENTPOOL_CAPACITY];
782 
783 		/* Transfer from the local pool to the global pool.  */
784 		entpool_extract(ec->ec_pool, buf, sizeof buf);
785 		entpool_enter(&E->pool, buf, sizeof buf);
786 		atomic_store_relaxed(&ec->ec_pending, 0);
787 		atomic_store_relaxed(&E->needed, 0);
788 
789 		/* Notify waiters that we now have full entropy.  */
790 		entropy_notify();
791 		entropy_immediate_evcnt.ev_count++;
792 	} else {
793 		/* Determine how much we can add to the global pool.  */
794 		KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY,
795 		    "E->pending=%u", E->pending);
796 		diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending);
797 
798 		/*
799 		 * This should make a difference unless we are already
800 		 * saturated.
801 		 */
802 		KASSERTMSG(diff || E->pending == ENTROPY_CAPACITY*NBBY,
803 		    "diff=%u E->pending=%u ec->ec_pending=%u cap=%u",
804 		    diff, E->pending, ec->ec_pending,
805 		    (unsigned)ENTROPY_CAPACITY*NBBY);
806 
807 		/* Add to the global, subtract from the local.  */
808 		E->pending += diff;
809 		KASSERT(E->pending);
810 		KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY,
811 		    "E->pending=%u", E->pending);
812 		atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff);
813 
814 		if (E->needed <= E->pending) {
815 			/*
816 			 * Enough entropy between all the per-CPU
817 			 * pools.  Wake up the housekeeping thread.
818 			 *
819 			 * If we don't need any entropy, this doesn't
820 			 * mean much, but it is the only time we ever
821 			 * gather additional entropy in case the
822 			 * accounting has been overly optimistic.  This
823 			 * happens at most once a minute, so there's
824 			 * negligible performance cost.
825 			 */
826 			E->consolidate = true;
827 			cv_broadcast(&E->cv);
828 			if (E->needed == 0)
829 				entropy_discretionary_evcnt.ev_count++;
830 		} else {
831 			/* Can't get full entropy.  Keep gathering.  */
832 			entropy_partial_evcnt.ev_count++;
833 		}
834 	}
835 	entropy_cpu_put(&lock, ec);
836 	mutex_exit(&E->lock);
837 }
838 
839 /*
840  * entropy_enter_early(buf, len, nbits)
841  *
842  *	Do entropy bookkeeping globally, before we have established
843  *	per-CPU pools.  Enter directly into the global pool in the hope
844  *	that we enter enough before the first entropy_extract to thwart
845  *	iterative-guessing attacks; entropy_extract will warn if not.
846  */
847 static void
848 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
849 {
850 	bool notify = false;
851 
852 	KASSERT(E->stage == ENTROPY_COLD);
853 
854 	/* Enter it into the pool.  */
855 	entpool_enter(&E->pool, buf, len);
856 
857 	/*
858 	 * Decide whether to notify reseed -- we will do so if either:
859 	 * (a) we transition from partial entropy to full entropy, or
860 	 * (b) we get a batch of full entropy all at once.
861 	 */
862 	notify |= (E->needed && E->needed <= nbits);
863 	notify |= (nbits >= ENTROPY_CAPACITY*NBBY);
864 
865 	/* Subtract from the needed count and notify if appropriate.  */
866 	E->needed -= MIN(E->needed, nbits);
867 	if (notify) {
868 		entropy_notify();
869 		entropy_immediate_evcnt.ev_count++;
870 	}
871 }
872 
873 /*
874  * entropy_enter(buf, len, nbits)
875  *
876  *	Enter len bytes of data from buf into the system's entropy
877  *	pool, stirring as necessary when the internal buffer fills up.
878  *	nbits is a lower bound on the number of bits of entropy in the
879  *	process that led to this sample.
880  */
881 static void
882 entropy_enter(const void *buf, size_t len, unsigned nbits)
883 {
884 	struct entropy_cpu_lock lock;
885 	struct entropy_cpu *ec;
886 	unsigned pending;
887 	int bound;
888 
889 	KASSERTMSG(!cpu_intr_p(),
890 	    "use entropy_enter_intr from interrupt context");
891 	KASSERTMSG(howmany(nbits, NBBY) <= len,
892 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
893 
894 	/* If it's too early after boot, just use entropy_enter_early.  */
895 	if (__predict_false(E->stage == ENTROPY_COLD)) {
896 		entropy_enter_early(buf, len, nbits);
897 		return;
898 	}
899 
900 	/*
901 	 * Bind ourselves to the current CPU so we don't switch CPUs
902 	 * between entering data into the current CPU's pool (and
903 	 * updating the pending count) and transferring it to the
904 	 * global pool in entropy_account_cpu.
905 	 */
906 	bound = curlwp_bind();
907 
908 	/*
909 	 * With the per-CPU state locked, enter into the per-CPU pool
910 	 * and count up what we can add.
911 	 */
912 	ec = entropy_cpu_get(&lock);
913 	entpool_enter(ec->ec_pool, buf, len);
914 	pending = ec->ec_pending;
915 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
916 	atomic_store_relaxed(&ec->ec_pending, pending);
917 	entropy_cpu_put(&lock, ec);
918 
919 	/* Consolidate globally if appropriate based on what we added.  */
920 	if (pending)
921 		entropy_account_cpu(ec);
922 
923 	curlwp_bindx(bound);
924 }
925 
926 /*
927  * entropy_enter_intr(buf, len, nbits)
928  *
929  *	Enter up to len bytes of data from buf into the system's
930  *	entropy pool without stirring.  nbits is a lower bound on the
931  *	number of bits of entropy in the process that led to this
932  *	sample.  If the sample could be entered completely, assume
933  *	nbits of entropy pending; otherwise assume none, since we don't
934  *	know whether some parts of the sample are constant, for
935  *	instance.  Schedule a softint to stir the entropy pool if
936  *	needed.  Return true if used fully, false if truncated at all.
937  *
938  *	Using this in thread context will work, but you might as well
939  *	use entropy_enter in that case.
940  */
941 static bool
942 entropy_enter_intr(const void *buf, size_t len, unsigned nbits)
943 {
944 	struct entropy_cpu *ec;
945 	bool fullyused = false;
946 	uint32_t pending;
947 	void *sih;
948 
949 	KASSERT(cpu_intr_p());
950 	KASSERTMSG(howmany(nbits, NBBY) <= len,
951 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
952 
953 	/* If it's too early after boot, just use entropy_enter_early.  */
954 	if (__predict_false(E->stage == ENTROPY_COLD)) {
955 		entropy_enter_early(buf, len, nbits);
956 		return true;
957 	}
958 
959 	/*
960 	 * Acquire the per-CPU state.  If someone is in the middle of
961 	 * using it, drop the sample.  Otherwise, take the lock so that
962 	 * higher-priority interrupts will drop their samples.
963 	 */
964 	ec = percpu_getref(entropy_percpu);
965 	if (ec->ec_locked) {
966 		ec->ec_evcnt->intrdrop.ev_count++;
967 		goto out0;
968 	}
969 	ec->ec_locked = true;
970 	__insn_barrier();
971 
972 	/*
973 	 * Enter as much as we can into the per-CPU pool.  If it was
974 	 * truncated, schedule a softint to stir the pool and stop.
975 	 */
976 	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
977 		sih = atomic_load_relaxed(&entropy_sih);
978 		if (__predict_true(sih != NULL))
979 			softint_schedule(sih);
980 		ec->ec_evcnt->intrtrunc.ev_count++;
981 		goto out1;
982 	}
983 	fullyused = true;
984 
985 	/* Count up what we can contribute.  */
986 	pending = ec->ec_pending;
987 	pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
988 	atomic_store_relaxed(&ec->ec_pending, pending);
989 
990 	/* Schedule a softint if we added anything and it matters.  */
991 	if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
992 		atomic_load_relaxed(&entropy_depletion)) &&
993 	    nbits != 0) {
994 		sih = atomic_load_relaxed(&entropy_sih);
995 		if (__predict_true(sih != NULL))
996 			softint_schedule(sih);
997 	}
998 
999 out1:	/* Release the per-CPU state.  */
1000 	KASSERT(ec->ec_locked);
1001 	__insn_barrier();
1002 	ec->ec_locked = false;
1003 out0:	percpu_putref(entropy_percpu);
1004 
1005 	return fullyused;
1006 }
1007 
1008 /*
1009  * entropy_softintr(cookie)
1010  *
1011  *	Soft interrupt handler for entering entropy.  Takes care of
1012  *	stirring the local CPU's entropy pool if it filled up during
1013  *	hard interrupts, and promptly crediting entropy from the local
1014  *	CPU's entropy pool to the global entropy pool if needed.
1015  */
1016 static void
1017 entropy_softintr(void *cookie)
1018 {
1019 	struct entropy_cpu_lock lock;
1020 	struct entropy_cpu *ec;
1021 	unsigned pending;
1022 
1023 	/*
1024 	 * With the per-CPU state locked, stir the pool if necessary
1025 	 * and determine if there's any pending entropy on this CPU to
1026 	 * account globally.
1027 	 */
1028 	ec = entropy_cpu_get(&lock);
1029 	ec->ec_evcnt->softint.ev_count++;
1030 	entpool_stir(ec->ec_pool);
1031 	pending = ec->ec_pending;
1032 	entropy_cpu_put(&lock, ec);
1033 
1034 	/* Consolidate globally if appropriate based on what we added.  */
1035 	if (pending)
1036 		entropy_account_cpu(ec);
1037 }
1038 
1039 /*
1040  * entropy_thread(cookie)
1041  *
1042  *	Handle any asynchronous entropy housekeeping.
1043  */
1044 static void
1045 entropy_thread(void *cookie)
1046 {
1047 	bool consolidate;
1048 
1049 	for (;;) {
1050 		/*
1051 		 * Wait until there's full entropy somewhere among the
1052 		 * CPUs, as confirmed at most once per minute, or
1053 		 * someone wants to consolidate.
1054 		 */
1055 		if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) {
1056 			consolidate = true;
1057 		} else {
1058 			mutex_enter(&E->lock);
1059 			if (!E->consolidate)
1060 				cv_timedwait(&E->cv, &E->lock, 60*hz);
1061 			consolidate = E->consolidate;
1062 			E->consolidate = false;
1063 			mutex_exit(&E->lock);
1064 		}
1065 
1066 		if (consolidate) {
1067 			/* Do it.  */
1068 			entropy_do_consolidate();
1069 
1070 			/* Mitigate abuse.  */
1071 			kpause("entropy", false, hz, NULL);
1072 		}
1073 	}
1074 }
1075 
1076 /*
1077  * entropy_pending()
1078  *
1079  *	Count up the amount of entropy pending on other CPUs.
1080  */
1081 static uint32_t
1082 entropy_pending(void)
1083 {
1084 	uint32_t pending = 0;
1085 
1086 	percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending);
1087 	return pending;
1088 }
1089 
1090 static void
1091 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
1092 {
1093 	struct entropy_cpu *ec = ptr;
1094 	uint32_t *pendingp = cookie;
1095 	uint32_t cpu_pending;
1096 
1097 	cpu_pending = atomic_load_relaxed(&ec->ec_pending);
1098 	*pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending);
1099 }
1100 
1101 /*
1102  * entropy_do_consolidate()
1103  *
1104  *	Issue a cross-call to gather entropy on all CPUs and advance
1105  *	the entropy epoch.
1106  */
1107 static void
1108 entropy_do_consolidate(void)
1109 {
1110 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1111 	static struct timeval lasttime; /* serialized by E->lock */
1112 	struct entpool pool;
1113 	uint8_t buf[ENTPOOL_CAPACITY];
1114 	unsigned diff;
1115 	uint64_t ticket;
1116 
1117 	/* Gather entropy on all CPUs into a temporary pool.  */
1118 	memset(&pool, 0, sizeof pool);
1119 	ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
1120 	xc_wait(ticket);
1121 
1122 	/* Acquire the lock to notify waiters.  */
1123 	mutex_enter(&E->lock);
1124 
1125 	/* Count another consolidation.  */
1126 	entropy_consolidate_evcnt.ev_count++;
1127 
1128 	/* Note when we last consolidated, i.e. now.  */
1129 	E->timestamp = time_uptime;
1130 
1131 	/* Mix what we gathered into the global pool.  */
1132 	entpool_extract(&pool, buf, sizeof buf);
1133 	entpool_enter(&E->pool, buf, sizeof buf);
1134 	explicit_memset(&pool, 0, sizeof pool);
1135 
1136 	/* Count the entropy that was gathered.  */
1137 	diff = MIN(E->needed, E->pending);
1138 	atomic_store_relaxed(&E->needed, E->needed - diff);
1139 	E->pending -= diff;
1140 	if (__predict_false(E->needed > 0)) {
1141 		if ((boothowto & AB_DEBUG) != 0 &&
1142 		    ratecheck(&lasttime, &interval)) {
1143 			printf("WARNING:"
1144 			    " consolidating less than full entropy\n");
1145 		}
1146 	}
1147 
1148 	/* Advance the epoch and notify waiters.  */
1149 	entropy_notify();
1150 
1151 	/* Release the lock.  */
1152 	mutex_exit(&E->lock);
1153 }
1154 
1155 /*
1156  * entropy_consolidate_xc(vpool, arg2)
1157  *
1158  *	Extract output from the local CPU's input pool and enter it
1159  *	into a temporary pool passed as vpool.
1160  */
1161 static void
1162 entropy_consolidate_xc(void *vpool, void *arg2 __unused)
1163 {
1164 	struct entpool *pool = vpool;
1165 	struct entropy_cpu_lock lock;
1166 	struct entropy_cpu *ec;
1167 	uint8_t buf[ENTPOOL_CAPACITY];
1168 	uint32_t extra[7];
1169 	unsigned i = 0;
1170 
1171 	/* Grab CPU number and cycle counter to mix extra into the pool.  */
1172 	extra[i++] = cpu_number();
1173 	extra[i++] = entropy_timer();
1174 
1175 	/*
1176 	 * With the per-CPU state locked, extract from the per-CPU pool
1177 	 * and count it as no longer pending.
1178 	 */
1179 	ec = entropy_cpu_get(&lock);
1180 	extra[i++] = entropy_timer();
1181 	entpool_extract(ec->ec_pool, buf, sizeof buf);
1182 	atomic_store_relaxed(&ec->ec_pending, 0);
1183 	extra[i++] = entropy_timer();
1184 	entropy_cpu_put(&lock, ec);
1185 	extra[i++] = entropy_timer();
1186 
1187 	/*
1188 	 * Copy over statistics, and enter the per-CPU extract and the
1189 	 * extra timing into the temporary pool, under the global lock.
1190 	 */
1191 	mutex_enter(&E->lock);
1192 	extra[i++] = entropy_timer();
1193 	entpool_enter(pool, buf, sizeof buf);
1194 	explicit_memset(buf, 0, sizeof buf);
1195 	extra[i++] = entropy_timer();
1196 	KASSERT(i == __arraycount(extra));
1197 	entpool_enter(pool, extra, sizeof extra);
1198 	explicit_memset(extra, 0, sizeof extra);
1199 	mutex_exit(&E->lock);
1200 }
1201 
1202 /*
1203  * entropy_notify()
1204  *
1205  *	Caller just contributed entropy to the global pool.  Advance
1206  *	the entropy epoch and notify waiters.
1207  *
1208  *	Caller must hold the global entropy lock.  Except for the
1209  *	`sysctl -w kern.entropy.consolidate=1` trigger, the caller must
1210  *	have just have transitioned from partial entropy to full
1211  *	entropy -- E->needed should be zero now.
1212  */
1213 static void
1214 entropy_notify(void)
1215 {
1216 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1217 	static struct timeval lasttime; /* serialized by E->lock */
1218 	unsigned epoch;
1219 
1220 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1221 
1222 	/*
1223 	 * If this is the first time, print a message to the console
1224 	 * that we're ready so operators can compare it to the timing
1225 	 * of other events.
1226 	 */
1227 	if (__predict_false(E->epoch == (unsigned)-1) && E->needed == 0)
1228 		printf("entropy: ready\n");
1229 
1230 	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
1231 	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
1232 	    ratecheck(&lasttime, &interval)) {
1233 		epoch = E->epoch + 1;
1234 		if (epoch == 0 || epoch == (unsigned)-1)
1235 			epoch = 1;
1236 		atomic_store_relaxed(&E->epoch, epoch);
1237 	}
1238 	KASSERT(E->epoch != (unsigned)-1);
1239 
1240 	/* Notify waiters.  */
1241 	if (E->stage >= ENTROPY_WARM) {
1242 		cv_broadcast(&E->cv);
1243 		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
1244 	}
1245 
1246 	/* Count another notification.  */
1247 	entropy_notify_evcnt.ev_count++;
1248 }
1249 
1250 /*
1251  * entropy_consolidate()
1252  *
1253  *	Trigger entropy consolidation and wait for it to complete.
1254  *
1255  *	This should be used sparingly, not periodically -- requiring
1256  *	conscious intervention by the operator or a clear policy
1257  *	decision.  Otherwise, the kernel will automatically consolidate
1258  *	when enough entropy has been gathered into per-CPU pools to
1259  *	transition to full entropy.
1260  */
1261 void
1262 entropy_consolidate(void)
1263 {
1264 	uint64_t ticket;
1265 	int error;
1266 
1267 	KASSERT(E->stage == ENTROPY_HOT);
1268 
1269 	mutex_enter(&E->lock);
1270 	ticket = entropy_consolidate_evcnt.ev_count;
1271 	E->consolidate = true;
1272 	cv_broadcast(&E->cv);
1273 	while (ticket == entropy_consolidate_evcnt.ev_count) {
1274 		error = cv_wait_sig(&E->cv, &E->lock);
1275 		if (error)
1276 			break;
1277 	}
1278 	mutex_exit(&E->lock);
1279 }
1280 
1281 /*
1282  * sysctl -w kern.entropy.consolidate=1
1283  *
1284  *	Trigger entropy consolidation and wait for it to complete.
1285  *	Writable only by superuser.  This, writing to /dev/random, and
1286  *	ioctl(RNDADDDATA) are the only ways for the system to
1287  *	consolidate entropy if the operator knows something the kernel
1288  *	doesn't about how unpredictable the pending entropy pools are.
1289  */
1290 static int
1291 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
1292 {
1293 	struct sysctlnode node = *rnode;
1294 	int arg = 0;
1295 	int error;
1296 
1297 	KASSERT(E->stage == ENTROPY_HOT);
1298 
1299 	node.sysctl_data = &arg;
1300 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1301 	if (error || newp == NULL)
1302 		return error;
1303 	if (arg)
1304 		entropy_consolidate();
1305 
1306 	return error;
1307 }
1308 
1309 /*
1310  * sysctl -w kern.entropy.gather=1
1311  *
1312  *	Trigger gathering entropy from all on-demand sources, and wait
1313  *	for synchronous sources (but not asynchronous sources) to
1314  *	complete.  Writable only by superuser.
1315  */
1316 static int
1317 sysctl_entropy_gather(SYSCTLFN_ARGS)
1318 {
1319 	struct sysctlnode node = *rnode;
1320 	int arg = 0;
1321 	int error;
1322 
1323 	KASSERT(E->stage == ENTROPY_HOT);
1324 
1325 	node.sysctl_data = &arg;
1326 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1327 	if (error || newp == NULL)
1328 		return error;
1329 	if (arg) {
1330 		mutex_enter(&E->lock);
1331 		error = entropy_request(ENTROPY_CAPACITY,
1332 		    ENTROPY_WAIT|ENTROPY_SIG);
1333 		mutex_exit(&E->lock);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 /*
1340  * entropy_extract(buf, len, flags)
1341  *
1342  *	Extract len bytes from the global entropy pool into buf.
1343  *
1344  *	Caller MUST NOT expose these bytes directly -- must use them
1345  *	ONLY to seed a cryptographic pseudorandom number generator
1346  *	(`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'),
1347  *	and then erase them.  entropy_extract does not, on its own,
1348  *	provide backtracking resistance -- it must be combined with a
1349  *	PRNG/DRBG that does.
1350  *
1351  *	You generally shouldn't use this directly -- use cprng(9)
1352  *	instead.
1353  *
1354  *	Flags may have:
1355  *
1356  *		ENTROPY_WAIT	Wait for entropy if not available yet.
1357  *		ENTROPY_SIG	Allow interruption by a signal during wait.
1358  *		ENTROPY_HARDFAIL Either fill the buffer with full entropy,
1359  *				or fail without filling it at all.
1360  *
1361  *	Return zero on success, or error on failure:
1362  *
1363  *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
1364  *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
1365  *
1366  *	If ENTROPY_WAIT is set, allowed only in thread context.  If
1367  *	ENTROPY_WAIT is not set, allowed also in softint context.
1368  *	Forbidden in hard interrupt context.
1369  */
1370 int
1371 entropy_extract(void *buf, size_t len, int flags)
1372 {
1373 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1374 	static struct timeval lasttime; /* serialized by E->lock */
1375 	int error;
1376 
1377 	if (ISSET(flags, ENTROPY_WAIT)) {
1378 		ASSERT_SLEEPABLE();
1379 		KASSERTMSG(E->stage >= ENTROPY_WARM,
1380 		    "can't wait for entropy until warm");
1381 	}
1382 
1383 	/* Refuse to operate in interrupt context.  */
1384 	KASSERT(!cpu_intr_p());
1385 
1386 	/* Acquire the global lock to get at the global pool.  */
1387 	if (E->stage >= ENTROPY_WARM)
1388 		mutex_enter(&E->lock);
1389 
1390 	/* Wait until there is enough entropy in the system.  */
1391 	error = 0;
1392 	while (E->needed) {
1393 		/* Ask for more, synchronously if possible.  */
1394 		error = entropy_request(len, flags);
1395 		if (error)
1396 			break;
1397 
1398 		/* If we got enough, we're done.  */
1399 		if (E->needed == 0) {
1400 			KASSERT(error == 0);
1401 			break;
1402 		}
1403 
1404 		/* If not waiting, stop here.  */
1405 		if (!ISSET(flags, ENTROPY_WAIT)) {
1406 			error = EWOULDBLOCK;
1407 			break;
1408 		}
1409 
1410 		/* Wait for some entropy to come in and try again.  */
1411 		KASSERT(E->stage >= ENTROPY_WARM);
1412 		printf("entropy: pid %d (%s) blocking due to lack of entropy\n",
1413 		       curproc->p_pid, curproc->p_comm);
1414 
1415 		if (ISSET(flags, ENTROPY_SIG)) {
1416 			error = cv_wait_sig(&E->cv, &E->lock);
1417 			if (error)
1418 				break;
1419 		} else {
1420 			cv_wait(&E->cv, &E->lock);
1421 		}
1422 	}
1423 
1424 	/*
1425 	 * Count failure -- but fill the buffer nevertheless, unless
1426 	 * the caller specified ENTROPY_HARDFAIL.
1427 	 */
1428 	if (error) {
1429 		if (ISSET(flags, ENTROPY_HARDFAIL))
1430 			goto out;
1431 		entropy_extract_fail_evcnt.ev_count++;
1432 	}
1433 
1434 	/*
1435 	 * Report a warning if we have never yet reached full entropy.
1436 	 * This is the only case where we consider entropy to be
1437 	 * `depleted' without kern.entropy.depletion enabled -- when we
1438 	 * only have partial entropy, an adversary may be able to
1439 	 * narrow the state of the pool down to a small number of
1440 	 * possibilities; the output then enables them to confirm a
1441 	 * guess, reducing its entropy from the adversary's perspective
1442 	 * to zero.
1443 	 */
1444 	if (__predict_false(E->epoch == (unsigned)-1)) {
1445 		if (ratecheck(&lasttime, &interval))
1446 			printf("WARNING:"
1447 			    " system needs entropy for security;"
1448 			    " see entropy(7)\n");
1449 		atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY);
1450 	}
1451 
1452 	/* Extract data from the pool, and `deplete' if we're doing that.  */
1453 	entpool_extract(&E->pool, buf, len);
1454 	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
1455 	    error == 0) {
1456 		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
1457 
1458 		atomic_store_relaxed(&E->needed,
1459 		    E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost));
1460 		entropy_deplete_evcnt.ev_count++;
1461 	}
1462 
1463 out:	/* Release the global lock and return the error.  */
1464 	if (E->stage >= ENTROPY_WARM)
1465 		mutex_exit(&E->lock);
1466 	return error;
1467 }
1468 
1469 /*
1470  * entropy_poll(events)
1471  *
1472  *	Return the subset of events ready, and if it is not all of
1473  *	events, record curlwp as waiting for entropy.
1474  */
1475 int
1476 entropy_poll(int events)
1477 {
1478 	int revents = 0;
1479 
1480 	KASSERT(E->stage >= ENTROPY_WARM);
1481 
1482 	/* Always ready for writing.  */
1483 	revents |= events & (POLLOUT|POLLWRNORM);
1484 
1485 	/* Narrow it down to reads.  */
1486 	events &= POLLIN|POLLRDNORM;
1487 	if (events == 0)
1488 		return revents;
1489 
1490 	/*
1491 	 * If we have reached full entropy and we're not depleting
1492 	 * entropy, we are forever ready.
1493 	 */
1494 	if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
1495 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
1496 		return revents | events;
1497 
1498 	/*
1499 	 * Otherwise, check whether we need entropy under the lock.  If
1500 	 * we don't, we're ready; if we do, add ourselves to the queue.
1501 	 */
1502 	mutex_enter(&E->lock);
1503 	if (E->needed == 0)
1504 		revents |= events;
1505 	else
1506 		selrecord(curlwp, &E->selq);
1507 	mutex_exit(&E->lock);
1508 
1509 	return revents;
1510 }
1511 
1512 /*
1513  * filt_entropy_read_detach(kn)
1514  *
1515  *	struct filterops::f_detach callback for entropy read events:
1516  *	remove kn from the list of waiters.
1517  */
1518 static void
1519 filt_entropy_read_detach(struct knote *kn)
1520 {
1521 
1522 	KASSERT(E->stage >= ENTROPY_WARM);
1523 
1524 	mutex_enter(&E->lock);
1525 	selremove_knote(&E->selq, kn);
1526 	mutex_exit(&E->lock);
1527 }
1528 
1529 /*
1530  * filt_entropy_read_event(kn, hint)
1531  *
1532  *	struct filterops::f_event callback for entropy read events:
1533  *	poll for entropy.  Caller must hold the global entropy lock if
1534  *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
1535  */
1536 static int
1537 filt_entropy_read_event(struct knote *kn, long hint)
1538 {
1539 	int ret;
1540 
1541 	KASSERT(E->stage >= ENTROPY_WARM);
1542 
1543 	/* Acquire the lock, if caller is outside entropy subsystem.  */
1544 	if (hint == NOTE_SUBMIT)
1545 		KASSERT(mutex_owned(&E->lock));
1546 	else
1547 		mutex_enter(&E->lock);
1548 
1549 	/*
1550 	 * If we still need entropy, can't read anything; if not, can
1551 	 * read arbitrarily much.
1552 	 */
1553 	if (E->needed != 0) {
1554 		ret = 0;
1555 	} else {
1556 		if (atomic_load_relaxed(&entropy_depletion))
1557 			kn->kn_data = ENTROPY_CAPACITY; /* bytes */
1558 		else
1559 			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
1560 		ret = 1;
1561 	}
1562 
1563 	/* Release the lock, if caller is outside entropy subsystem.  */
1564 	if (hint == NOTE_SUBMIT)
1565 		KASSERT(mutex_owned(&E->lock));
1566 	else
1567 		mutex_exit(&E->lock);
1568 
1569 	return ret;
1570 }
1571 
1572 /* XXX Makes sense only for /dev/u?random.  */
1573 static const struct filterops entropy_read_filtops = {
1574 	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
1575 	.f_attach = NULL,
1576 	.f_detach = filt_entropy_read_detach,
1577 	.f_event = filt_entropy_read_event,
1578 };
1579 
1580 /*
1581  * entropy_kqfilter(kn)
1582  *
1583  *	Register kn to receive entropy event notifications.  May be
1584  *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
1585  */
1586 int
1587 entropy_kqfilter(struct knote *kn)
1588 {
1589 
1590 	KASSERT(E->stage >= ENTROPY_WARM);
1591 
1592 	switch (kn->kn_filter) {
1593 	case EVFILT_READ:
1594 		/* Enter into the global select queue.  */
1595 		mutex_enter(&E->lock);
1596 		kn->kn_fop = &entropy_read_filtops;
1597 		selrecord_knote(&E->selq, kn);
1598 		mutex_exit(&E->lock);
1599 		return 0;
1600 	case EVFILT_WRITE:
1601 		/* Can always dump entropy into the system.  */
1602 		kn->kn_fop = &seltrue_filtops;
1603 		return 0;
1604 	default:
1605 		return EINVAL;
1606 	}
1607 }
1608 
1609 /*
1610  * rndsource_setcb(rs, get, getarg)
1611  *
1612  *	Set the request callback for the entropy source rs, if it can
1613  *	provide entropy on demand.  Must precede rnd_attach_source.
1614  */
1615 void
1616 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
1617     void *getarg)
1618 {
1619 
1620 	rs->get = get;
1621 	rs->getarg = getarg;
1622 }
1623 
1624 /*
1625  * rnd_attach_source(rs, name, type, flags)
1626  *
1627  *	Attach the entropy source rs.  Must be done after
1628  *	rndsource_setcb, if any, and before any calls to rnd_add_data.
1629  */
1630 void
1631 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
1632     uint32_t flags)
1633 {
1634 	uint32_t extra[4];
1635 	unsigned i = 0;
1636 
1637 	KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name");
1638 
1639 	/* Grab cycle counter to mix extra into the pool.  */
1640 	extra[i++] = entropy_timer();
1641 
1642 	/*
1643 	 * Apply some standard flags:
1644 	 *
1645 	 * - We do not bother with network devices by default, for
1646 	 *   hysterical raisins (perhaps: because it is often the case
1647 	 *   that an adversary can influence network packet timings).
1648 	 */
1649 	switch (type) {
1650 	case RND_TYPE_NET:
1651 		flags |= RND_FLAG_NO_COLLECT;
1652 		break;
1653 	}
1654 
1655 	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
1656 	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
1657 
1658 	/* Initialize the random source.  */
1659 	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
1660 	strlcpy(rs->name, name, sizeof(rs->name));
1661 	memset(&rs->time_delta, 0, sizeof(rs->time_delta));
1662 	memset(&rs->value_delta, 0, sizeof(rs->value_delta));
1663 	rs->total = 0;
1664 	rs->type = type;
1665 	rs->flags = flags;
1666 	if (E->stage >= ENTROPY_WARM)
1667 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
1668 	extra[i++] = entropy_timer();
1669 
1670 	/* Wire it into the global list of random sources.  */
1671 	if (E->stage >= ENTROPY_WARM)
1672 		mutex_enter(&E->lock);
1673 	LIST_INSERT_HEAD(&E->sources, rs, list);
1674 	if (E->stage >= ENTROPY_WARM)
1675 		mutex_exit(&E->lock);
1676 	extra[i++] = entropy_timer();
1677 
1678 	/* Request that it provide entropy ASAP, if we can.  */
1679 	if (ISSET(flags, RND_FLAG_HASCB))
1680 		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
1681 	extra[i++] = entropy_timer();
1682 
1683 	/* Mix the extra into the pool.  */
1684 	KASSERT(i == __arraycount(extra));
1685 	entropy_enter(extra, sizeof extra, 0);
1686 	explicit_memset(extra, 0, sizeof extra);
1687 }
1688 
1689 /*
1690  * rnd_detach_source(rs)
1691  *
1692  *	Detach the entropy source rs.  May sleep waiting for users to
1693  *	drain.  Further use is not allowed.
1694  */
1695 void
1696 rnd_detach_source(struct krndsource *rs)
1697 {
1698 
1699 	/*
1700 	 * If we're cold (shouldn't happen, but hey), just remove it
1701 	 * from the list -- there's nothing allocated.
1702 	 */
1703 	if (E->stage == ENTROPY_COLD) {
1704 		LIST_REMOVE(rs, list);
1705 		return;
1706 	}
1707 
1708 	/* We may have to wait for entropy_request.  */
1709 	ASSERT_SLEEPABLE();
1710 
1711 	/* Wait until the source list is not in use, and remove it.  */
1712 	mutex_enter(&E->lock);
1713 	while (E->sourcelock)
1714 		cv_wait(&E->sourcelock_cv, &E->lock);
1715 	LIST_REMOVE(rs, list);
1716 	mutex_exit(&E->lock);
1717 
1718 	/* Free the per-CPU data.  */
1719 	percpu_free(rs->state, sizeof(struct rndsource_cpu));
1720 }
1721 
1722 /*
1723  * rnd_lock_sources(flags)
1724  *
1725  *	Lock the list of entropy sources.  Caller must hold the global
1726  *	entropy lock.  If successful, no rndsource will go away until
1727  *	rnd_unlock_sources even while the caller releases the global
1728  *	entropy lock.
1729  *
1730  *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1731  *	If flags & ENTROPY_SIG, allow interruption by signal.
1732  */
1733 static int __attribute__((warn_unused_result))
1734 rnd_lock_sources(int flags)
1735 {
1736 	int error;
1737 
1738 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1739 
1740 	while (E->sourcelock) {
1741 		KASSERT(E->stage >= ENTROPY_WARM);
1742 		if (!ISSET(flags, ENTROPY_WAIT))
1743 			return EWOULDBLOCK;
1744 		if (ISSET(flags, ENTROPY_SIG)) {
1745 			error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
1746 			if (error)
1747 				return error;
1748 		} else {
1749 			cv_wait(&E->sourcelock_cv, &E->lock);
1750 		}
1751 	}
1752 
1753 	E->sourcelock = curlwp;
1754 	return 0;
1755 }
1756 
1757 /*
1758  * rnd_unlock_sources()
1759  *
1760  *	Unlock the list of sources after rnd_lock_sources.  Caller must
1761  *	hold the global entropy lock.
1762  */
1763 static void
1764 rnd_unlock_sources(void)
1765 {
1766 
1767 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1768 
1769 	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
1770 	    curlwp, E->sourcelock);
1771 	E->sourcelock = NULL;
1772 	if (E->stage >= ENTROPY_WARM)
1773 		cv_signal(&E->sourcelock_cv);
1774 }
1775 
1776 /*
1777  * rnd_sources_locked()
1778  *
1779  *	True if we hold the list of rndsources locked, for diagnostic
1780  *	assertions.
1781  */
1782 static bool __diagused
1783 rnd_sources_locked(void)
1784 {
1785 
1786 	return E->sourcelock == curlwp;
1787 }
1788 
1789 /*
1790  * entropy_request(nbytes, flags)
1791  *
1792  *	Request nbytes bytes of entropy from all sources in the system.
1793  *	OK if we overdo it.  Caller must hold the global entropy lock;
1794  *	will release and re-acquire it.
1795  *
1796  *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1797  *	If flags & ENTROPY_SIG, allow interruption by signal.
1798  */
1799 static int
1800 entropy_request(size_t nbytes, int flags)
1801 {
1802 	struct krndsource *rs;
1803 	int error;
1804 
1805 	KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
1806 	if (flags & ENTROPY_WAIT)
1807 		ASSERT_SLEEPABLE();
1808 
1809 	/*
1810 	 * Lock the list of entropy sources to block rnd_detach_source
1811 	 * until we're done, and to serialize calls to the entropy
1812 	 * callbacks as guaranteed to drivers.
1813 	 */
1814 	error = rnd_lock_sources(flags);
1815 	if (error)
1816 		return error;
1817 	entropy_request_evcnt.ev_count++;
1818 
1819 	/* Clamp to the maximum reasonable request.  */
1820 	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
1821 
1822 	/* Walk the list of sources.  */
1823 	LIST_FOREACH(rs, &E->sources, list) {
1824 		/* Skip sources without callbacks.  */
1825 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
1826 			continue;
1827 
1828 		/*
1829 		 * Skip sources that are disabled altogether -- we
1830 		 * would just ignore their samples anyway.
1831 		 */
1832 		if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
1833 			continue;
1834 
1835 		/* Drop the lock while we call the callback.  */
1836 		if (E->stage >= ENTROPY_WARM)
1837 			mutex_exit(&E->lock);
1838 		(*rs->get)(nbytes, rs->getarg);
1839 		if (E->stage >= ENTROPY_WARM)
1840 			mutex_enter(&E->lock);
1841 	}
1842 
1843 	/* Request done; unlock the list of entropy sources.  */
1844 	rnd_unlock_sources();
1845 	return 0;
1846 }
1847 
1848 /*
1849  * rnd_add_uint32(rs, value)
1850  *
1851  *	Enter 32 bits of data from an entropy source into the pool.
1852  *
1853  *	If rs is NULL, may not be called from interrupt context.
1854  *
1855  *	If rs is non-NULL, may be called from any context.  May drop
1856  *	data if called from interrupt context.
1857  */
1858 void
1859 rnd_add_uint32(struct krndsource *rs, uint32_t value)
1860 {
1861 
1862 	rnd_add_data(rs, &value, sizeof value, 0);
1863 }
1864 
1865 void
1866 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
1867 {
1868 
1869 	rnd_add_data(rs, &value, sizeof value, 0);
1870 }
1871 
1872 void
1873 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
1874 {
1875 
1876 	rnd_add_data(rs, &value, sizeof value, 0);
1877 }
1878 
1879 /*
1880  * rnd_add_data(rs, buf, len, entropybits)
1881  *
1882  *	Enter data from an entropy source into the pool, with a
1883  *	driver's estimate of how much entropy the physical source of
1884  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
1885  *	estimate and treat it as zero.
1886  *
1887  *	If rs is NULL, may not be called from interrupt context.
1888  *
1889  *	If rs is non-NULL, may be called from any context.  May drop
1890  *	data if called from interrupt context.
1891  */
1892 void
1893 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
1894     uint32_t entropybits)
1895 {
1896 	uint32_t extra;
1897 	uint32_t flags;
1898 
1899 	KASSERTMSG(howmany(entropybits, NBBY) <= len,
1900 	    "%s: impossible entropy rate:"
1901 	    " %"PRIu32" bits in %"PRIu32"-byte string",
1902 	    rs ? rs->name : "(anonymous)", entropybits, len);
1903 
1904 	/* If there's no rndsource, just enter the data and time now.  */
1905 	if (rs == NULL) {
1906 		entropy_enter(buf, len, entropybits);
1907 		extra = entropy_timer();
1908 		entropy_enter(&extra, sizeof extra, 0);
1909 		explicit_memset(&extra, 0, sizeof extra);
1910 		return;
1911 	}
1912 
1913 	/*
1914 	 * Hold up the reset xcall before it zeroes the entropy counts
1915 	 * on this CPU or globally.  Otherwise, we might leave some
1916 	 * nonzero entropy attributed to an untrusted source in the
1917 	 * event of a race with a change to flags.
1918 	 */
1919 	kpreempt_disable();
1920 
1921 	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
1922 	flags = atomic_load_relaxed(&rs->flags);
1923 
1924 	/*
1925 	 * Skip if:
1926 	 * - we're not collecting entropy, or
1927 	 * - the operator doesn't want to collect entropy from this, or
1928 	 * - neither data nor timings are being collected from this.
1929 	 */
1930 	if (!atomic_load_relaxed(&entropy_collection) ||
1931 	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
1932 	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
1933 		goto out;
1934 
1935 	/* If asked, ignore the estimate.  */
1936 	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
1937 		entropybits = 0;
1938 
1939 	/* If we are collecting data, enter them.  */
1940 	if (ISSET(flags, RND_FLAG_COLLECT_VALUE))
1941 		rnd_add_data_1(rs, buf, len, entropybits,
1942 		    RND_FLAG_COLLECT_VALUE);
1943 
1944 	/* If we are collecting timings, enter one.  */
1945 	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
1946 		extra = entropy_timer();
1947 		rnd_add_data_1(rs, &extra, sizeof extra, 0,
1948 		    RND_FLAG_COLLECT_TIME);
1949 	}
1950 
1951 out:	/* Allow concurrent changes to flags to finish.  */
1952 	kpreempt_enable();
1953 }
1954 
1955 static unsigned
1956 add_sat(unsigned a, unsigned b)
1957 {
1958 	unsigned c = a + b;
1959 
1960 	return (c < a ? UINT_MAX : c);
1961 }
1962 
1963 /*
1964  * rnd_add_data_1(rs, buf, len, entropybits, flag)
1965  *
1966  *	Internal subroutine to call either entropy_enter_intr, if we're
1967  *	in interrupt context, or entropy_enter if not, and to count the
1968  *	entropy in an rndsource.
1969  */
1970 static void
1971 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
1972     uint32_t entropybits, uint32_t flag)
1973 {
1974 	bool fullyused;
1975 
1976 	/*
1977 	 * If we're in interrupt context, use entropy_enter_intr and
1978 	 * take note of whether it consumed the full sample; if not,
1979 	 * use entropy_enter, which always consumes the full sample.
1980 	 */
1981 	if (curlwp && cpu_intr_p()) {
1982 		fullyused = entropy_enter_intr(buf, len, entropybits);
1983 	} else {
1984 		entropy_enter(buf, len, entropybits);
1985 		fullyused = true;
1986 	}
1987 
1988 	/*
1989 	 * If we used the full sample, note how many bits were
1990 	 * contributed from this source.
1991 	 */
1992 	if (fullyused) {
1993 		if (__predict_false(E->stage == ENTROPY_COLD)) {
1994 			rs->total = add_sat(rs->total, entropybits);
1995 			switch (flag) {
1996 			case RND_FLAG_COLLECT_TIME:
1997 				rs->time_delta.insamples =
1998 				    add_sat(rs->time_delta.insamples, 1);
1999 				break;
2000 			case RND_FLAG_COLLECT_VALUE:
2001 				rs->value_delta.insamples =
2002 				    add_sat(rs->value_delta.insamples, 1);
2003 				break;
2004 			}
2005 		} else {
2006 			struct rndsource_cpu *rc = percpu_getref(rs->state);
2007 
2008 			atomic_store_relaxed(&rc->rc_entropybits,
2009 			    add_sat(rc->rc_entropybits, entropybits));
2010 			switch (flag) {
2011 			case RND_FLAG_COLLECT_TIME:
2012 				atomic_store_relaxed(&rc->rc_timesamples,
2013 				    add_sat(rc->rc_timesamples, 1));
2014 				break;
2015 			case RND_FLAG_COLLECT_VALUE:
2016 				atomic_store_relaxed(&rc->rc_datasamples,
2017 				    add_sat(rc->rc_datasamples, 1));
2018 				break;
2019 			}
2020 			percpu_putref(rs->state);
2021 		}
2022 	}
2023 }
2024 
2025 /*
2026  * rnd_add_data_sync(rs, buf, len, entropybits)
2027  *
2028  *	Same as rnd_add_data.  Originally used in rndsource callbacks,
2029  *	to break an unnecessary cycle; no longer really needed.
2030  */
2031 void
2032 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
2033     uint32_t entropybits)
2034 {
2035 
2036 	rnd_add_data(rs, buf, len, entropybits);
2037 }
2038 
2039 /*
2040  * rndsource_entropybits(rs)
2041  *
2042  *	Return approximately the number of bits of entropy that have
2043  *	been contributed via rs so far.  Approximate if other CPUs may
2044  *	be calling rnd_add_data concurrently.
2045  */
2046 static unsigned
2047 rndsource_entropybits(struct krndsource *rs)
2048 {
2049 	unsigned nbits = rs->total;
2050 
2051 	KASSERT(E->stage >= ENTROPY_WARM);
2052 	KASSERT(rnd_sources_locked());
2053 	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
2054 	return nbits;
2055 }
2056 
2057 static void
2058 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2059 {
2060 	struct rndsource_cpu *rc = ptr;
2061 	unsigned *nbitsp = cookie;
2062 	unsigned cpu_nbits;
2063 
2064 	cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
2065 	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
2066 }
2067 
2068 /*
2069  * rndsource_to_user(rs, urs)
2070  *
2071  *	Copy a description of rs out to urs for userland.
2072  */
2073 static void
2074 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
2075 {
2076 
2077 	KASSERT(E->stage >= ENTROPY_WARM);
2078 	KASSERT(rnd_sources_locked());
2079 
2080 	/* Avoid kernel memory disclosure.  */
2081 	memset(urs, 0, sizeof(*urs));
2082 
2083 	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
2084 	strlcpy(urs->name, rs->name, sizeof(urs->name));
2085 	urs->total = rndsource_entropybits(rs);
2086 	urs->type = rs->type;
2087 	urs->flags = atomic_load_relaxed(&rs->flags);
2088 }
2089 
2090 /*
2091  * rndsource_to_user_est(rs, urse)
2092  *
2093  *	Copy a description of rs and estimation statistics out to urse
2094  *	for userland.
2095  */
2096 static void
2097 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
2098 {
2099 
2100 	KASSERT(E->stage >= ENTROPY_WARM);
2101 	KASSERT(rnd_sources_locked());
2102 
2103 	/* Avoid kernel memory disclosure.  */
2104 	memset(urse, 0, sizeof(*urse));
2105 
2106 	/* Copy out the rndsource description.  */
2107 	rndsource_to_user(rs, &urse->rt);
2108 
2109 	/* Gather the statistics.  */
2110 	urse->dt_samples = rs->time_delta.insamples;
2111 	urse->dt_total = 0;
2112 	urse->dv_samples = rs->value_delta.insamples;
2113 	urse->dv_total = urse->rt.total;
2114 	percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
2115 }
2116 
2117 static void
2118 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2119 {
2120 	struct rndsource_cpu *rc = ptr;
2121 	rndsource_est_t *urse = cookie;
2122 
2123 	urse->dt_samples = add_sat(urse->dt_samples,
2124 	    atomic_load_relaxed(&rc->rc_timesamples));
2125 	urse->dv_samples = add_sat(urse->dv_samples,
2126 	    atomic_load_relaxed(&rc->rc_datasamples));
2127 }
2128 
2129 /*
2130  * entropy_reset_xc(arg1, arg2)
2131  *
2132  *	Reset the current CPU's pending entropy to zero.
2133  */
2134 static void
2135 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
2136 {
2137 	uint32_t extra = entropy_timer();
2138 	struct entropy_cpu_lock lock;
2139 	struct entropy_cpu *ec;
2140 
2141 	/*
2142 	 * With the per-CPU state locked, zero the pending count and
2143 	 * enter a cycle count for fun.
2144 	 */
2145 	ec = entropy_cpu_get(&lock);
2146 	ec->ec_pending = 0;
2147 	entpool_enter(ec->ec_pool, &extra, sizeof extra);
2148 	entropy_cpu_put(&lock, ec);
2149 }
2150 
2151 /*
2152  * entropy_ioctl(cmd, data)
2153  *
2154  *	Handle various /dev/random ioctl queries.
2155  */
2156 int
2157 entropy_ioctl(unsigned long cmd, void *data)
2158 {
2159 	struct krndsource *rs;
2160 	bool privileged;
2161 	int error;
2162 
2163 	KASSERT(E->stage >= ENTROPY_WARM);
2164 
2165 	/* Verify user's authorization to perform the ioctl.  */
2166 	switch (cmd) {
2167 	case RNDGETENTCNT:
2168 	case RNDGETPOOLSTAT:
2169 	case RNDGETSRCNUM:
2170 	case RNDGETSRCNAME:
2171 	case RNDGETESTNUM:
2172 	case RNDGETESTNAME:
2173 		error = kauth_authorize_device(kauth_cred_get(),
2174 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
2175 		break;
2176 	case RNDCTL:
2177 		error = kauth_authorize_device(kauth_cred_get(),
2178 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
2179 		break;
2180 	case RNDADDDATA:
2181 		error = kauth_authorize_device(kauth_cred_get(),
2182 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
2183 		/* Ascertain whether the user's inputs should be counted.  */
2184 		if (kauth_authorize_device(kauth_cred_get(),
2185 			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
2186 			NULL, NULL, NULL, NULL) == 0)
2187 			privileged = true;
2188 		break;
2189 	default: {
2190 		/*
2191 		 * XXX Hack to avoid changing module ABI so this can be
2192 		 * pulled up.  Later, we can just remove the argument.
2193 		 */
2194 		static const struct fileops fops = {
2195 			.fo_ioctl = rnd_system_ioctl,
2196 		};
2197 		struct file f = {
2198 			.f_ops = &fops,
2199 		};
2200 		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
2201 		    enosys(), error);
2202 #if defined(_LP64)
2203 		if (error == ENOSYS)
2204 			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
2205 			    enosys(), error);
2206 #endif
2207 		if (error == ENOSYS)
2208 			error = ENOTTY;
2209 		break;
2210 	}
2211 	}
2212 
2213 	/* If anything went wrong with authorization, stop here.  */
2214 	if (error)
2215 		return error;
2216 
2217 	/* Dispatch on the command.  */
2218 	switch (cmd) {
2219 	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
2220 		uint32_t *countp = data;
2221 
2222 		mutex_enter(&E->lock);
2223 		*countp = ENTROPY_CAPACITY*NBBY - E->needed;
2224 		mutex_exit(&E->lock);
2225 
2226 		break;
2227 	}
2228 	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
2229 		rndpoolstat_t *pstat = data;
2230 
2231 		mutex_enter(&E->lock);
2232 
2233 		/* parameters */
2234 		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
2235 		pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */
2236 		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
2237 
2238 		/* state */
2239 		pstat->added = 0; /* XXX total entropy_enter count */
2240 		pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed;
2241 		pstat->removed = 0; /* XXX total entropy_extract count */
2242 		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
2243 		pstat->generated = 0; /* XXX bits of data...fabricated? */
2244 
2245 		mutex_exit(&E->lock);
2246 		break;
2247 	}
2248 	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
2249 		rndstat_t *stat = data;
2250 		uint32_t start = 0, i = 0;
2251 
2252 		/* Skip if none requested; fail if too many requested.  */
2253 		if (stat->count == 0)
2254 			break;
2255 		if (stat->count > RND_MAXSTATCOUNT)
2256 			return EINVAL;
2257 
2258 		/*
2259 		 * Under the lock, find the first one, copy out as many
2260 		 * as requested, and report how many we copied out.
2261 		 */
2262 		mutex_enter(&E->lock);
2263 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2264 		if (error) {
2265 			mutex_exit(&E->lock);
2266 			return error;
2267 		}
2268 		LIST_FOREACH(rs, &E->sources, list) {
2269 			if (start++ == stat->start)
2270 				break;
2271 		}
2272 		while (i < stat->count && rs != NULL) {
2273 			mutex_exit(&E->lock);
2274 			rndsource_to_user(rs, &stat->source[i++]);
2275 			mutex_enter(&E->lock);
2276 			rs = LIST_NEXT(rs, list);
2277 		}
2278 		KASSERT(i <= stat->count);
2279 		stat->count = i;
2280 		rnd_unlock_sources();
2281 		mutex_exit(&E->lock);
2282 		break;
2283 	}
2284 	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
2285 		rndstat_est_t *estat = data;
2286 		uint32_t start = 0, i = 0;
2287 
2288 		/* Skip if none requested; fail if too many requested.  */
2289 		if (estat->count == 0)
2290 			break;
2291 		if (estat->count > RND_MAXSTATCOUNT)
2292 			return EINVAL;
2293 
2294 		/*
2295 		 * Under the lock, find the first one, copy out as many
2296 		 * as requested, and report how many we copied out.
2297 		 */
2298 		mutex_enter(&E->lock);
2299 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2300 		if (error) {
2301 			mutex_exit(&E->lock);
2302 			return error;
2303 		}
2304 		LIST_FOREACH(rs, &E->sources, list) {
2305 			if (start++ == estat->start)
2306 				break;
2307 		}
2308 		while (i < estat->count && rs != NULL) {
2309 			mutex_exit(&E->lock);
2310 			rndsource_to_user_est(rs, &estat->source[i++]);
2311 			mutex_enter(&E->lock);
2312 			rs = LIST_NEXT(rs, list);
2313 		}
2314 		KASSERT(i <= estat->count);
2315 		estat->count = i;
2316 		rnd_unlock_sources();
2317 		mutex_exit(&E->lock);
2318 		break;
2319 	}
2320 	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
2321 		rndstat_name_t *nstat = data;
2322 		const size_t n = sizeof(rs->name);
2323 
2324 		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
2325 
2326 		/*
2327 		 * Under the lock, search by name.  If found, copy it
2328 		 * out; if not found, fail with ENOENT.
2329 		 */
2330 		mutex_enter(&E->lock);
2331 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2332 		if (error) {
2333 			mutex_exit(&E->lock);
2334 			return error;
2335 		}
2336 		LIST_FOREACH(rs, &E->sources, list) {
2337 			if (strncmp(rs->name, nstat->name, n) == 0)
2338 				break;
2339 		}
2340 		if (rs != NULL) {
2341 			mutex_exit(&E->lock);
2342 			rndsource_to_user(rs, &nstat->source);
2343 			mutex_enter(&E->lock);
2344 		} else {
2345 			error = ENOENT;
2346 		}
2347 		rnd_unlock_sources();
2348 		mutex_exit(&E->lock);
2349 		break;
2350 	}
2351 	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
2352 		rndstat_est_name_t *enstat = data;
2353 		const size_t n = sizeof(rs->name);
2354 
2355 		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
2356 
2357 		/*
2358 		 * Under the lock, search by name.  If found, copy it
2359 		 * out; if not found, fail with ENOENT.
2360 		 */
2361 		mutex_enter(&E->lock);
2362 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2363 		if (error) {
2364 			mutex_exit(&E->lock);
2365 			return error;
2366 		}
2367 		LIST_FOREACH(rs, &E->sources, list) {
2368 			if (strncmp(rs->name, enstat->name, n) == 0)
2369 				break;
2370 		}
2371 		if (rs != NULL) {
2372 			mutex_exit(&E->lock);
2373 			rndsource_to_user_est(rs, &enstat->source);
2374 			mutex_enter(&E->lock);
2375 		} else {
2376 			error = ENOENT;
2377 		}
2378 		rnd_unlock_sources();
2379 		mutex_exit(&E->lock);
2380 		break;
2381 	}
2382 	case RNDCTL: {		/* Modify entropy source flags.  */
2383 		rndctl_t *rndctl = data;
2384 		const size_t n = sizeof(rs->name);
2385 		uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2386 		uint32_t flags;
2387 		bool reset = false, request = false;
2388 
2389 		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
2390 
2391 		/* Whitelist the flags that user can change.  */
2392 		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2393 
2394 		/*
2395 		 * For each matching rndsource, either by type if
2396 		 * specified or by name if not, set the masked flags.
2397 		 */
2398 		mutex_enter(&E->lock);
2399 		LIST_FOREACH(rs, &E->sources, list) {
2400 			if (rndctl->type != 0xff) {
2401 				if (rs->type != rndctl->type)
2402 					continue;
2403 			} else if (rndctl->name[0] != '\0') {
2404 				if (strncmp(rs->name, rndctl->name, n) != 0)
2405 					continue;
2406 			}
2407 			flags = rs->flags & ~rndctl->mask;
2408 			flags |= rndctl->flags & rndctl->mask;
2409 			if ((rs->flags & resetflags) == 0 &&
2410 			    (flags & resetflags) != 0)
2411 				reset = true;
2412 			if ((rs->flags ^ flags) & resetflags)
2413 				request = true;
2414 			atomic_store_relaxed(&rs->flags, flags);
2415 		}
2416 		mutex_exit(&E->lock);
2417 
2418 		/*
2419 		 * If we disabled estimation or collection, nix all the
2420 		 * pending entropy and set needed to the maximum.
2421 		 */
2422 		if (reset) {
2423 			xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
2424 			mutex_enter(&E->lock);
2425 			E->pending = 0;
2426 			atomic_store_relaxed(&E->needed,
2427 			    ENTROPY_CAPACITY*NBBY);
2428 			E->consolidate = false;
2429 			mutex_exit(&E->lock);
2430 		}
2431 
2432 		/*
2433 		 * If we changed any of the estimation or collection
2434 		 * flags, request new samples from everyone -- either
2435 		 * to make up for what we just lost, or to get new
2436 		 * samples from what we just added.
2437 		 *
2438 		 * Failing on signal, while waiting for another process
2439 		 * to finish requesting entropy, is OK here even though
2440 		 * we have committed side effects, because this ioctl
2441 		 * command is idempotent, so repeating it is safe.
2442 		 */
2443 		if (request) {
2444 			mutex_enter(&E->lock);
2445 			error = entropy_request(ENTROPY_CAPACITY,
2446 			    ENTROPY_WAIT|ENTROPY_SIG);
2447 			mutex_exit(&E->lock);
2448 		}
2449 		break;
2450 	}
2451 	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
2452 		rnddata_t *rdata = data;
2453 		unsigned entropybits = 0;
2454 
2455 		if (!atomic_load_relaxed(&entropy_collection))
2456 			break;	/* thanks but no thanks */
2457 		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
2458 			return EINVAL;
2459 
2460 		/*
2461 		 * This ioctl serves as the userland alternative a
2462 		 * bootloader-provided seed -- typically furnished by
2463 		 * /etc/rc.d/random_seed.  We accept the user's entropy
2464 		 * claim only if
2465 		 *
2466 		 * (a) the user is privileged, and
2467 		 * (b) we have not entered a bootloader seed.
2468 		 *
2469 		 * under the assumption that the user may use this to
2470 		 * load a seed from disk that we have already loaded
2471 		 * from the bootloader, so we don't double-count it.
2472 		 */
2473 		if (privileged && rdata->entropy && rdata->len) {
2474 			mutex_enter(&E->lock);
2475 			if (!E->seeded) {
2476 				entropybits = MIN(rdata->entropy,
2477 				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
2478 				E->seeded = true;
2479 			}
2480 			mutex_exit(&E->lock);
2481 		}
2482 
2483 		/* Enter the data and consolidate entropy.  */
2484 		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
2485 		    entropybits);
2486 		entropy_consolidate();
2487 		break;
2488 	}
2489 	default:
2490 		error = ENOTTY;
2491 	}
2492 
2493 	/* Return any error that may have come up.  */
2494 	return error;
2495 }
2496 
2497 /* Legacy entry points */
2498 
2499 void
2500 rnd_seed(void *seed, size_t len)
2501 {
2502 
2503 	if (len != sizeof(rndsave_t)) {
2504 		printf("entropy: invalid seed length: %zu,"
2505 		    " expected sizeof(rndsave_t) = %zu\n",
2506 		    len, sizeof(rndsave_t));
2507 		return;
2508 	}
2509 	entropy_seed(seed);
2510 }
2511 
2512 void
2513 rnd_init(void)
2514 {
2515 
2516 	entropy_init();
2517 }
2518 
2519 void
2520 rnd_init_softint(void)
2521 {
2522 
2523 	entropy_init_late();
2524 	entropy_bootrequest();
2525 }
2526 
2527 int
2528 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
2529 {
2530 
2531 	return entropy_ioctl(cmd, data);
2532 }
2533