xref: /netbsd-src/sys/kern/kern_entropy.c (revision 63372caa2f74032c7c1cb34e7cd32f28ad65b703)
1 /*	$NetBSD: kern_entropy.c,v 1.72 2024/08/27 00:56:47 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Entropy subsystem
34  *
35  *	* Each CPU maintains a per-CPU entropy pool so that gathering
36  *	  entropy requires no interprocessor synchronization, except
37  *	  early at boot when we may be scrambling to gather entropy as
38  *	  soon as possible.
39  *
40  *	  - entropy_enter gathers entropy and never drops it on the
41  *	    floor, at the cost of sometimes having to do cryptography.
42  *
43  *	  - entropy_enter_intr gathers entropy or drops it on the
44  *	    floor, with low latency.  Work to stir the pool or kick the
45  *	    housekeeping thread is scheduled in soft interrupts.
46  *
47  *	* entropy_enter immediately enters into the global pool if it
48  *	  can transition to full entropy in one swell foop.  Otherwise,
49  *	  it defers to a housekeeping thread that consolidates entropy,
50  *	  but only when the CPUs collectively have full entropy, in
51  *	  order to mitigate iterative-guessing attacks.
52  *
53  *	* The entropy housekeeping thread continues to consolidate
54  *	  entropy even after we think we have full entropy, in case we
55  *	  are wrong, but is limited to one discretionary consolidation
56  *	  per minute, and only when new entropy is actually coming in,
57  *	  to limit performance impact.
58  *
59  *	* The entropy epoch is the number that changes when we
60  *	  transition from partial entropy to full entropy, so that
61  *	  users can easily determine when to reseed.  This also
62  *	  facilitates an operator explicitly causing everything to
63  *	  reseed by sysctl -w kern.entropy.consolidate=1.
64  *
65  *	* Entropy depletion is available for testing (or if you're into
66  *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
67  *	  the logic to support it is small, to minimize chance of bugs.
68  *
69  *	* While cold, a single global entropy pool is available for
70  *	  entering and extracting, serialized through splhigh/splx.
71  *	  The per-CPU entropy pool data structures are initialized in
72  *	  entropy_init and entropy_init_late (separated mainly for
73  *	  hysterical raisins at this point), but are not used until the
74  *	  system is warm, at which point access to the global entropy
75  *	  pool is limited to thread and softint context and serialized
76  *	  by E->lock.
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.72 2024/08/27 00:56:47 riastradh Exp $");
81 
82 #include <sys/param.h>
83 #include <sys/types.h>
84 #include <sys/atomic.h>
85 #include <sys/compat_stub.h>
86 #include <sys/condvar.h>
87 #include <sys/cpu.h>
88 #include <sys/entropy.h>
89 #include <sys/errno.h>
90 #include <sys/evcnt.h>
91 #include <sys/event.h>
92 #include <sys/file.h>
93 #include <sys/intr.h>
94 #include <sys/kauth.h>
95 #include <sys/kernel.h>
96 #include <sys/kmem.h>
97 #include <sys/kthread.h>
98 #include <sys/lwp.h>
99 #include <sys/module_hook.h>
100 #include <sys/mutex.h>
101 #include <sys/percpu.h>
102 #include <sys/poll.h>
103 #include <sys/proc.h>
104 #include <sys/queue.h>
105 #include <sys/reboot.h>
106 #include <sys/rnd.h>		/* legacy kernel API */
107 #include <sys/rndio.h>		/* userland ioctl interface */
108 #include <sys/rndsource.h>	/* kernel rndsource driver API */
109 #include <sys/select.h>
110 #include <sys/selinfo.h>
111 #include <sys/sha1.h>		/* for boot seed checksum */
112 #include <sys/stdint.h>
113 #include <sys/sysctl.h>
114 #include <sys/syslog.h>
115 #include <sys/systm.h>
116 #include <sys/time.h>
117 #include <sys/xcall.h>
118 
119 #include <lib/libkern/entpool.h>
120 
121 #include <machine/limits.h>
122 
123 #ifdef __HAVE_CPU_COUNTER
124 #include <machine/cpu_counter.h>
125 #endif
126 
127 #define	MINENTROPYBYTES	ENTROPY_CAPACITY
128 #define	MINENTROPYBITS	(MINENTROPYBYTES*NBBY)
129 #define	MINSAMPLES	(2*MINENTROPYBITS)
130 
131 /*
132  * struct entropy_cpu
133  *
134  *	Per-CPU entropy state.  The pool is allocated separately
135  *	because percpu(9) sometimes moves per-CPU objects around
136  *	without zeroing them, which would lead to unwanted copies of
137  *	sensitive secrets.  The evcnt is allocated separately because
138  *	evcnt(9) assumes it stays put in memory.
139  */
140 struct entropy_cpu {
141 	struct entropy_cpu_evcnt {
142 		struct evcnt		softint;
143 		struct evcnt		intrdrop;
144 		struct evcnt		intrtrunc;
145 	}			*ec_evcnt;
146 	struct entpool		*ec_pool;
147 	unsigned		ec_bitspending;
148 	unsigned		ec_samplespending;
149 	bool			ec_locked;
150 };
151 
152 /*
153  * struct entropy_cpu_lock
154  *
155  *	State for locking the per-CPU entropy state.
156  */
157 struct entropy_cpu_lock {
158 	int		ecl_s;
159 	long		ecl_pctr;
160 };
161 
162 /*
163  * struct rndsource_cpu
164  *
165  *	Per-CPU rndsource state.
166  */
167 struct rndsource_cpu {
168 	unsigned		rc_entropybits;
169 	unsigned		rc_timesamples;
170 	unsigned		rc_datasamples;
171 	rnd_delta_t		rc_timedelta;
172 };
173 
174 /*
175  * entropy_global (a.k.a. E for short in this file)
176  *
177  *	Global entropy state.  Writes protected by the global lock.
178  *	Some fields, marked (A), can be read outside the lock, and are
179  *	maintained with atomic_load/store_relaxed.
180  */
181 struct {
182 	kmutex_t	lock;		/* covers all global state */
183 	struct entpool	pool;		/* global pool for extraction */
184 	unsigned	bitsneeded;	/* (A) needed globally */
185 	unsigned	bitspending;	/* pending in per-CPU pools */
186 	unsigned	samplesneeded;	/* (A) needed globally */
187 	unsigned	samplespending;	/* pending in per-CPU pools */
188 	unsigned	timestamp;	/* (A) time of last consolidation */
189 	unsigned	epoch;		/* (A) changes when needed -> 0 */
190 	kcondvar_t	cv;		/* notifies state changes */
191 	struct selinfo	selq;		/* notifies needed -> 0 */
192 	struct lwp	*sourcelock;	/* lock on list of sources */
193 	kcondvar_t	sourcelock_cv;	/* notifies sourcelock release */
194 	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
195 	bool		consolidate;	/* kick thread to consolidate */
196 	bool		seed_rndsource;	/* true if seed source is attached */
197 	bool		seeded;		/* true if seed file already loaded */
198 } entropy_global __cacheline_aligned = {
199 	/* Fields that must be initialized when the kernel is loaded.  */
200 	.bitsneeded = MINENTROPYBITS,
201 	.samplesneeded = MINSAMPLES,
202 	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
203 	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
204 };
205 
206 #define	E	(&entropy_global)	/* declutter */
207 
208 /* Read-mostly globals */
209 static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
210 static void		*entropy_sih __read_mostly; /* softint handler */
211 static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
212 
213 static struct krndsource seed_rndsource __read_mostly;
214 
215 /*
216  * Event counters
217  *
218  *	Must be careful with adding these because they can serve as
219  *	side channels.
220  */
221 static struct evcnt entropy_discretionary_evcnt =
222     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
223 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
224 static struct evcnt entropy_immediate_evcnt =
225     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
226 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
227 static struct evcnt entropy_partial_evcnt =
228     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
229 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
230 static struct evcnt entropy_consolidate_evcnt =
231     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
232 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
233 static struct evcnt entropy_extract_fail_evcnt =
234     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
235 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
236 static struct evcnt entropy_request_evcnt =
237     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
238 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
239 static struct evcnt entropy_deplete_evcnt =
240     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
241 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
242 static struct evcnt entropy_notify_evcnt =
243     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
244 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
245 
246 /* Sysctl knobs */
247 static bool	entropy_collection = 1;
248 static bool	entropy_depletion = 0; /* Silly!  */
249 
250 static const struct sysctlnode	*entropy_sysctlroot;
251 static struct sysctllog		*entropy_sysctllog;
252 
253 /* Forward declarations */
254 static void	entropy_init_cpu(void *, void *, struct cpu_info *);
255 static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
256 static void	entropy_account_cpu(struct entropy_cpu *);
257 static void	entropy_enter(const void *, size_t, unsigned, bool);
258 static bool	entropy_enter_intr(const void *, size_t, unsigned, bool);
259 static void	entropy_softintr(void *);
260 static void	entropy_thread(void *);
261 static bool	entropy_pending(void);
262 static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
263 static void	entropy_do_consolidate(void);
264 static void	entropy_consolidate_xc(void *, void *);
265 static void	entropy_notify(void);
266 static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
267 static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
268 static void	filt_entropy_read_detach(struct knote *);
269 static int	filt_entropy_read_event(struct knote *, long);
270 static int	entropy_request(size_t, int);
271 static void	rnd_add_data_internal(struct krndsource *, const void *,
272 		    uint32_t, uint32_t, bool);
273 static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
274 		    uint32_t, bool, uint32_t, bool);
275 static unsigned	rndsource_entropybits(struct krndsource *);
276 static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
277 static void	rndsource_to_user(struct krndsource *, rndsource_t *);
278 static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
279 static void	rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
280 
281 /*
282  * entropy_timer()
283  *
284  *	Cycle counter, time counter, or anything that changes a wee bit
285  *	unpredictably.
286  */
287 static inline uint32_t
288 entropy_timer(void)
289 {
290 	struct bintime bt;
291 	uint32_t v;
292 
293 	/* If we have a CPU cycle counter, use the low 32 bits.  */
294 #ifdef __HAVE_CPU_COUNTER
295 	if (__predict_true(cpu_hascounter()))
296 		return cpu_counter32();
297 #endif	/* __HAVE_CPU_COUNTER */
298 
299 	/* If we're cold, tough.  Can't binuptime while cold.  */
300 	if (__predict_false(cold))
301 		return 0;
302 
303 	/* Fold the 128 bits of binuptime into 32 bits.  */
304 	binuptime(&bt);
305 	v = bt.frac;
306 	v ^= bt.frac >> 32;
307 	v ^= bt.sec;
308 	v ^= bt.sec >> 32;
309 	return v;
310 }
311 
312 static void
313 attach_seed_rndsource(void)
314 {
315 
316 	KASSERT(!cpu_intr_p());
317 	KASSERT(!cpu_softintr_p());
318 	KASSERT(cold);
319 
320 	/*
321 	 * First called no later than entropy_init, while we are still
322 	 * single-threaded, so no need for RUN_ONCE.
323 	 */
324 	if (E->seed_rndsource)
325 		return;
326 
327 	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
328 	    RND_FLAG_COLLECT_VALUE);
329 	E->seed_rndsource = true;
330 }
331 
332 /*
333  * entropy_init()
334  *
335  *	Initialize the entropy subsystem.  Panic on failure.
336  *
337  *	Requires percpu(9) and sysctl(9) to be initialized.  Must run
338  *	while cold.
339  */
340 static void
341 entropy_init(void)
342 {
343 	uint32_t extra[2];
344 	struct krndsource *rs;
345 	unsigned i = 0;
346 
347 	KASSERT(cold);
348 
349 	/* Grab some cycle counts early at boot.  */
350 	extra[i++] = entropy_timer();
351 
352 	/* Run the entropy pool cryptography self-test.  */
353 	if (entpool_selftest() == -1)
354 		panic("entropy pool crypto self-test failed");
355 
356 	/* Create the sysctl directory.  */
357 	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
358 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
359 	    SYSCTL_DESCR("Entropy (random number sources) options"),
360 	    NULL, 0, NULL, 0,
361 	    CTL_KERN, CTL_CREATE, CTL_EOL);
362 
363 	/* Create the sysctl knobs.  */
364 	/* XXX These shouldn't be writable at securelevel>0.  */
365 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
366 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
367 	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
368 	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
369 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
370 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
371 	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
372 	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
373 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
374 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
375 	    SYSCTL_DESCR("Trigger entropy consolidation now"),
376 	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
377 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
378 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
379 	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
380 	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
381 	/* XXX These should maybe not be readable at securelevel>0.  */
382 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
383 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
384 	    "needed",
385 	    SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"),
386 	    NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL);
387 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
388 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
389 	    "pending",
390 	    SYSCTL_DESCR("Number of bits of entropy pending on CPUs"),
391 	    NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL);
392 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
393 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
394 	    "samplesneeded",
395 	    SYSCTL_DESCR("Systemwide entropy deficit (samples)"),
396 	    NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL);
397 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
398 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
399 	    "samplespending",
400 	    SYSCTL_DESCR("Number of samples pending on CPUs"),
401 	    NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL);
402 	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
403 	    CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT,
404 	    "epoch", SYSCTL_DESCR("Entropy epoch"),
405 	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
406 
407 	/* Initialize the global state for multithreaded operation.  */
408 	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
409 	cv_init(&E->cv, "entropy");
410 	selinit(&E->selq);
411 	cv_init(&E->sourcelock_cv, "entsrclock");
412 
413 	/* Make sure the seed source is attached.  */
414 	attach_seed_rndsource();
415 
416 	/* Note if the bootloader didn't provide a seed.  */
417 	if (!E->seeded)
418 		aprint_debug("entropy: no seed from bootloader\n");
419 
420 	/* Allocate the per-CPU records for all early entropy sources.  */
421 	LIST_FOREACH(rs, &E->sources, list)
422 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
423 
424 	/* Allocate and initialize the per-CPU state.  */
425 	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
426 	    entropy_init_cpu, entropy_fini_cpu, NULL);
427 
428 	/* Enter the boot cycle count to get started.  */
429 	extra[i++] = entropy_timer();
430 	KASSERT(i == __arraycount(extra));
431 	entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false);
432 	explicit_memset(extra, 0, sizeof extra);
433 }
434 
435 /*
436  * entropy_init_late()
437  *
438  *	Late initialization.  Panic on failure.
439  *
440  *	Requires CPUs to have been detected and LWPs to have started.
441  *	Must run while cold.
442  */
443 static void
444 entropy_init_late(void)
445 {
446 	int error;
447 
448 	KASSERT(cold);
449 
450 	/*
451 	 * Establish the softint at the highest softint priority level.
452 	 * Must happen after CPU detection.
453 	 */
454 	entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
455 	    &entropy_softintr, NULL);
456 	if (entropy_sih == NULL)
457 		panic("unable to establish entropy softint");
458 
459 	/*
460 	 * Create the entropy housekeeping thread.  Must happen after
461 	 * lwpinit.
462 	 */
463 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
464 	    entropy_thread, NULL, &entropy_lwp, "entbutler");
465 	if (error)
466 		panic("unable to create entropy housekeeping thread: %d",
467 		    error);
468 }
469 
470 /*
471  * entropy_init_cpu(ptr, cookie, ci)
472  *
473  *	percpu(9) constructor for per-CPU entropy pool.
474  */
475 static void
476 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
477 {
478 	struct entropy_cpu *ec = ptr;
479 	const char *cpuname;
480 
481 	ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP);
482 	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
483 	ec->ec_bitspending = 0;
484 	ec->ec_samplespending = 0;
485 	ec->ec_locked = false;
486 
487 	/* XXX ci_cpuname may not be initialized early enough.  */
488 	cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname;
489 	evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL,
490 	    cpuname, "entropy softint");
491 	evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL,
492 	    cpuname, "entropy intrdrop");
493 	evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL,
494 	    cpuname, "entropy intrtrunc");
495 }
496 
497 /*
498  * entropy_fini_cpu(ptr, cookie, ci)
499  *
500  *	percpu(9) destructor for per-CPU entropy pool.
501  */
502 static void
503 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
504 {
505 	struct entropy_cpu *ec = ptr;
506 
507 	/*
508 	 * Zero any lingering data.  Disclosure of the per-CPU pool
509 	 * shouldn't retroactively affect the security of any keys
510 	 * generated, because entpool(9) erases whatever we have just
511 	 * drawn out of any pool, but better safe than sorry.
512 	 */
513 	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
514 
515 	evcnt_detach(&ec->ec_evcnt->intrtrunc);
516 	evcnt_detach(&ec->ec_evcnt->intrdrop);
517 	evcnt_detach(&ec->ec_evcnt->softint);
518 
519 	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
520 	kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt));
521 }
522 
523 /*
524  * ec = entropy_cpu_get(&lock)
525  * entropy_cpu_put(&lock, ec)
526  *
527  *	Lock and unlock the per-CPU entropy state.  This only prevents
528  *	access on the same CPU -- by hard interrupts, by soft
529  *	interrupts, or by other threads.
530  *
531  *	Blocks soft interrupts and preemption altogether; doesn't block
532  *	hard interrupts, but causes samples in hard interrupts to be
533  *	dropped.
534  */
535 static struct entropy_cpu *
536 entropy_cpu_get(struct entropy_cpu_lock *lock)
537 {
538 	struct entropy_cpu *ec;
539 
540 	ec = percpu_getref(entropy_percpu);
541 	lock->ecl_s = splsoftserial();
542 	KASSERT(!ec->ec_locked);
543 	ec->ec_locked = true;
544 	lock->ecl_pctr = lwp_pctr();
545 	__insn_barrier();
546 
547 	return ec;
548 }
549 
550 static void
551 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
552 {
553 
554 	KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu()));
555 	KASSERT(ec->ec_locked);
556 
557 	__insn_barrier();
558 	KASSERT(lock->ecl_pctr == lwp_pctr());
559 	ec->ec_locked = false;
560 	splx(lock->ecl_s);
561 	percpu_putref(entropy_percpu);
562 }
563 
564 /*
565  * entropy_seed(seed)
566  *
567  *	Seed the entropy pool with seed.  Meant to be called as early
568  *	as possible by the bootloader; may be called before or after
569  *	entropy_init.  Must be called before system reaches userland.
570  *	Must be called in thread or soft interrupt context, not in hard
571  *	interrupt context.  Must be called at most once.
572  *
573  *	Overwrites the seed in place.  Caller may then free the memory.
574  */
575 static void
576 entropy_seed(rndsave_t *seed)
577 {
578 	SHA1_CTX ctx;
579 	uint8_t digest[SHA1_DIGEST_LENGTH];
580 	bool seeded;
581 
582 	KASSERT(!cpu_intr_p());
583 	KASSERT(!cpu_softintr_p());
584 	KASSERT(cold);
585 
586 	/*
587 	 * Verify the checksum.  If the checksum fails, take the data
588 	 * but ignore the entropy estimate -- the file may have been
589 	 * incompletely written with garbage, which is harmless to add
590 	 * but may not be as unpredictable as alleged.
591 	 */
592 	SHA1Init(&ctx);
593 	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
594 	SHA1Update(&ctx, seed->data, sizeof(seed->data));
595 	SHA1Final(digest, &ctx);
596 	CTASSERT(sizeof(seed->digest) == sizeof(digest));
597 	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
598 		printf("entropy: invalid seed checksum\n");
599 		seed->entropy = 0;
600 	}
601 	explicit_memset(&ctx, 0, sizeof ctx);
602 	explicit_memset(digest, 0, sizeof digest);
603 
604 	/*
605 	 * If the entropy is insensibly large, try byte-swapping.
606 	 * Otherwise assume the file is corrupted and act as though it
607 	 * has zero entropy.
608 	 */
609 	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
610 		seed->entropy = bswap32(seed->entropy);
611 		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
612 			seed->entropy = 0;
613 	}
614 
615 	/* Make sure the seed source is attached.  */
616 	attach_seed_rndsource();
617 
618 	/* Test and set E->seeded.  */
619 	seeded = E->seeded;
620 	E->seeded = (seed->entropy > 0);
621 
622 	/*
623 	 * If we've been seeded, may be re-entering the same seed
624 	 * (e.g., bootloader vs module init, or something).  No harm in
625 	 * entering it twice, but it contributes no additional entropy.
626 	 */
627 	if (seeded) {
628 		printf("entropy: double-seeded by bootloader\n");
629 		seed->entropy = 0;
630 	} else {
631 		printf("entropy: entering seed from bootloader"
632 		    " with %u bits of entropy\n", (unsigned)seed->entropy);
633 	}
634 
635 	/* Enter it into the pool and promptly zero it.  */
636 	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
637 	    seed->entropy);
638 	explicit_memset(seed, 0, sizeof(*seed));
639 }
640 
641 /*
642  * entropy_bootrequest()
643  *
644  *	Request entropy from all sources at boot, once config is
645  *	complete and interrupts are running but we are still cold.
646  */
647 void
648 entropy_bootrequest(void)
649 {
650 	int error;
651 
652 	KASSERT(!cpu_intr_p());
653 	KASSERT(!cpu_softintr_p());
654 	KASSERT(cold);
655 
656 	/*
657 	 * Request enough to satisfy the maximum entropy shortage.
658 	 * This is harmless overkill if the bootloader provided a seed.
659 	 */
660 	error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT);
661 	KASSERTMSG(error == 0, "error=%d", error);
662 }
663 
664 /*
665  * entropy_epoch()
666  *
667  *	Returns the current entropy epoch.  If this changes, you should
668  *	reseed.  If -1, means system entropy has not yet reached full
669  *	entropy or been explicitly consolidated; never reverts back to
670  *	-1.  Never zero, so you can always use zero as an uninitialized
671  *	sentinel value meaning `reseed ASAP'.
672  *
673  *	Usage model:
674  *
675  *		struct foo {
676  *			struct crypto_prng prng;
677  *			unsigned epoch;
678  *		} *foo;
679  *
680  *		unsigned epoch = entropy_epoch();
681  *		if (__predict_false(epoch != foo->epoch)) {
682  *			uint8_t seed[32];
683  *			if (entropy_extract(seed, sizeof seed, 0) != 0)
684  *				warn("no entropy");
685  *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
686  *			foo->epoch = epoch;
687  *		}
688  */
689 unsigned
690 entropy_epoch(void)
691 {
692 
693 	/*
694 	 * Unsigned int, so no need for seqlock for an atomic read, but
695 	 * make sure we read it afresh each time.
696 	 */
697 	return atomic_load_relaxed(&E->epoch);
698 }
699 
700 /*
701  * entropy_ready()
702  *
703  *	True if the entropy pool has full entropy.
704  */
705 bool
706 entropy_ready(void)
707 {
708 
709 	return atomic_load_relaxed(&E->bitsneeded) == 0;
710 }
711 
712 /*
713  * entropy_account_cpu(ec)
714  *
715  *	Consider whether to consolidate entropy into the global pool
716  *	after we just added some into the current CPU's pending pool.
717  *
718  *	- If this CPU can provide enough entropy now, do so.
719  *
720  *	- If this and whatever else is available on other CPUs can
721  *	  provide enough entropy, kick the consolidation thread.
722  *
723  *	- Otherwise, do as little as possible, except maybe consolidate
724  *	  entropy at most once a minute.
725  *
726  *	Caller must be bound to a CPU and therefore have exclusive
727  *	access to ec.  Will acquire and release the global lock.
728  */
729 static void
730 entropy_account_cpu(struct entropy_cpu *ec)
731 {
732 	struct entropy_cpu_lock lock;
733 	struct entropy_cpu *ec0;
734 	unsigned bitsdiff, samplesdiff;
735 
736 	KASSERT(!cpu_intr_p());
737 	KASSERT(!cold);
738 	KASSERT(curlwp->l_pflag & LP_BOUND);
739 
740 	/*
741 	 * If there's no entropy needed, and entropy has been
742 	 * consolidated in the last minute, do nothing.
743 	 */
744 	if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) &&
745 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
746 	    __predict_true((time_uptime - E->timestamp) <= 60))
747 		return;
748 
749 	/*
750 	 * Consider consolidation, under the global lock and with the
751 	 * per-CPU state locked.
752 	 */
753 	mutex_enter(&E->lock);
754 	ec0 = entropy_cpu_get(&lock);
755 	KASSERT(ec0 == ec);
756 
757 	if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) {
758 		/* Raced with consolidation xcall.  Nothing to do.  */
759 	} else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) {
760 		/*
761 		 * If we have not yet attained full entropy but we can
762 		 * now, do so.  This way we disseminate entropy
763 		 * promptly when it becomes available early at boot;
764 		 * otherwise we leave it to the entropy consolidation
765 		 * thread, which is rate-limited to mitigate side
766 		 * channels and abuse.
767 		 */
768 		uint8_t buf[ENTPOOL_CAPACITY];
769 
770 		/* Transfer from the local pool to the global pool.  */
771 		entpool_extract(ec->ec_pool, buf, sizeof buf);
772 		entpool_enter(&E->pool, buf, sizeof buf);
773 		atomic_store_relaxed(&ec->ec_bitspending, 0);
774 		atomic_store_relaxed(&ec->ec_samplespending, 0);
775 		atomic_store_relaxed(&E->bitsneeded, 0);
776 		atomic_store_relaxed(&E->samplesneeded, 0);
777 
778 		/* Notify waiters that we now have full entropy.  */
779 		entropy_notify();
780 		entropy_immediate_evcnt.ev_count++;
781 	} else {
782 		/* Determine how much we can add to the global pool.  */
783 		KASSERTMSG(E->bitspending <= MINENTROPYBITS,
784 		    "E->bitspending=%u", E->bitspending);
785 		bitsdiff = MIN(ec->ec_bitspending,
786 		    MINENTROPYBITS - E->bitspending);
787 		KASSERTMSG(E->samplespending <= MINSAMPLES,
788 		    "E->samplespending=%u", E->samplespending);
789 		samplesdiff = MIN(ec->ec_samplespending,
790 		    MINSAMPLES - E->samplespending);
791 
792 		/*
793 		 * This should make a difference unless we are already
794 		 * saturated.
795 		 */
796 		KASSERTMSG((bitsdiff || samplesdiff ||
797 			E->bitspending == MINENTROPYBITS ||
798 			E->samplespending == MINSAMPLES),
799 		    "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u"
800 		    "samplesdiff=%u E->samplespending=%u"
801 		    " ec->ec_samplespending=%u"
802 		    " minentropybits=%u minsamples=%u",
803 		    bitsdiff, E->bitspending, ec->ec_bitspending,
804 		    samplesdiff, E->samplespending, ec->ec_samplespending,
805 		    (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES);
806 
807 		/* Add to the global, subtract from the local.  */
808 		E->bitspending += bitsdiff;
809 		KASSERTMSG(E->bitspending <= MINENTROPYBITS,
810 		    "E->bitspending=%u", E->bitspending);
811 		atomic_store_relaxed(&ec->ec_bitspending,
812 		    ec->ec_bitspending - bitsdiff);
813 
814 		E->samplespending += samplesdiff;
815 		KASSERTMSG(E->samplespending <= MINSAMPLES,
816 		    "E->samplespending=%u", E->samplespending);
817 		atomic_store_relaxed(&ec->ec_samplespending,
818 		    ec->ec_samplespending - samplesdiff);
819 
820 		/* One or the other must have gone up from zero.  */
821 		KASSERT(E->bitspending || E->samplespending);
822 
823 		if (E->bitsneeded <= E->bitspending ||
824 		    E->samplesneeded <= E->samplespending) {
825 			/*
826 			 * Enough bits or at least samples between all
827 			 * the per-CPU pools.  Leave a note for the
828 			 * housekeeping thread to consolidate entropy
829 			 * next time it wakes up -- and wake it up if
830 			 * this is the first time, to speed things up.
831 			 *
832 			 * If we don't need any entropy, this doesn't
833 			 * mean much, but it is the only time we ever
834 			 * gather additional entropy in case the
835 			 * accounting has been overly optimistic.  This
836 			 * happens at most once a minute, so there's
837 			 * negligible performance cost.
838 			 */
839 			E->consolidate = true;
840 			if (E->epoch == (unsigned)-1)
841 				cv_broadcast(&E->cv);
842 			if (E->bitsneeded == 0)
843 				entropy_discretionary_evcnt.ev_count++;
844 		} else {
845 			/* Can't get full entropy.  Keep gathering.  */
846 			entropy_partial_evcnt.ev_count++;
847 		}
848 	}
849 
850 	entropy_cpu_put(&lock, ec);
851 	mutex_exit(&E->lock);
852 }
853 
854 /*
855  * entropy_enter_early(buf, len, nbits)
856  *
857  *	Do entropy bookkeeping globally, before we have established
858  *	per-CPU pools.  Enter directly into the global pool in the hope
859  *	that we enter enough before the first entropy_extract to thwart
860  *	iterative-guessing attacks; entropy_extract will warn if not.
861  */
862 static void
863 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
864 {
865 	bool notify = false;
866 	int s;
867 
868 	KASSERT(cold);
869 
870 	/*
871 	 * We're early at boot before multithreading and multi-CPU
872 	 * operation, and we don't have softints yet to defer
873 	 * processing from interrupt context, so we have to enter the
874 	 * samples directly into the global pool.  But interrupts may
875 	 * be enabled, and we enter this path from interrupt context,
876 	 * so block interrupts until we're done.
877 	 */
878 	s = splhigh();
879 
880 	/* Enter it into the pool.  */
881 	entpool_enter(&E->pool, buf, len);
882 
883 	/*
884 	 * Decide whether to notify reseed -- we will do so if either:
885 	 * (a) we transition from partial entropy to full entropy, or
886 	 * (b) we get a batch of full entropy all at once.
887 	 * We don't count timing samples because we assume, while cold,
888 	 * there's not likely to be much jitter yet.
889 	 */
890 	notify |= (E->bitsneeded && E->bitsneeded <= nbits);
891 	notify |= (nbits >= MINENTROPYBITS);
892 
893 	/*
894 	 * Subtract from the needed count and notify if appropriate.
895 	 * We don't count samples here because entropy_timer might
896 	 * still be returning zero at this point if there's no CPU
897 	 * cycle counter.
898 	 */
899 	E->bitsneeded -= MIN(E->bitsneeded, nbits);
900 	if (notify) {
901 		entropy_notify();
902 		entropy_immediate_evcnt.ev_count++;
903 	}
904 
905 	splx(s);
906 }
907 
908 /*
909  * entropy_enter(buf, len, nbits, count)
910  *
911  *	Enter len bytes of data from buf into the system's entropy
912  *	pool, stirring as necessary when the internal buffer fills up.
913  *	nbits is a lower bound on the number of bits of entropy in the
914  *	process that led to this sample.
915  */
916 static void
917 entropy_enter(const void *buf, size_t len, unsigned nbits, bool count)
918 {
919 	struct entropy_cpu_lock lock;
920 	struct entropy_cpu *ec;
921 	unsigned bitspending, samplespending;
922 	int bound;
923 
924 	KASSERTMSG(!cpu_intr_p(),
925 	    "use entropy_enter_intr from interrupt context");
926 	KASSERTMSG(howmany(nbits, NBBY) <= len,
927 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
928 
929 	/*
930 	 * If we're still cold, just use entropy_enter_early to put
931 	 * samples directly into the global pool.
932 	 */
933 	if (__predict_false(cold)) {
934 		entropy_enter_early(buf, len, nbits);
935 		return;
936 	}
937 
938 	/*
939 	 * Bind ourselves to the current CPU so we don't switch CPUs
940 	 * between entering data into the current CPU's pool (and
941 	 * updating the pending count) and transferring it to the
942 	 * global pool in entropy_account_cpu.
943 	 */
944 	bound = curlwp_bind();
945 
946 	/*
947 	 * With the per-CPU state locked, enter into the per-CPU pool
948 	 * and count up what we can add.
949 	 *
950 	 * We don't count samples while cold because entropy_timer
951 	 * might still be returning zero if there's no CPU cycle
952 	 * counter.
953 	 */
954 	ec = entropy_cpu_get(&lock);
955 	entpool_enter(ec->ec_pool, buf, len);
956 	bitspending = ec->ec_bitspending;
957 	bitspending += MIN(MINENTROPYBITS - bitspending, nbits);
958 	atomic_store_relaxed(&ec->ec_bitspending, bitspending);
959 	samplespending = ec->ec_samplespending;
960 	if (__predict_true(count)) {
961 		samplespending += MIN(MINSAMPLES - samplespending, 1);
962 		atomic_store_relaxed(&ec->ec_samplespending, samplespending);
963 	}
964 	entropy_cpu_put(&lock, ec);
965 
966 	/* Consolidate globally if appropriate based on what we added.  */
967 	if (bitspending > 0 || samplespending >= MINSAMPLES)
968 		entropy_account_cpu(ec);
969 
970 	curlwp_bindx(bound);
971 }
972 
973 /*
974  * entropy_enter_intr(buf, len, nbits, count)
975  *
976  *	Enter up to len bytes of data from buf into the system's
977  *	entropy pool without stirring.  nbits is a lower bound on the
978  *	number of bits of entropy in the process that led to this
979  *	sample.  If the sample could be entered completely, assume
980  *	nbits of entropy pending; otherwise assume none, since we don't
981  *	know whether some parts of the sample are constant, for
982  *	instance.  Schedule a softint to stir the entropy pool if
983  *	needed.  Return true if used fully, false if truncated at all.
984  *
985  *	Using this in thread or softint context with no spin locks held
986  *	will work, but you might as well use entropy_enter in that
987  *	case.
988  */
989 static bool
990 entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count)
991 {
992 	struct entropy_cpu *ec;
993 	bool fullyused = false;
994 	uint32_t bitspending, samplespending;
995 	int s;
996 
997 	KASSERTMSG(howmany(nbits, NBBY) <= len,
998 	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
999 
1000 	/*
1001 	 * If we're still cold, just use entropy_enter_early to put
1002 	 * samples directly into the global pool.
1003 	 */
1004 	if (__predict_false(cold)) {
1005 		entropy_enter_early(buf, len, nbits);
1006 		return true;
1007 	}
1008 
1009 	/*
1010 	 * In case we were called in thread or interrupt context with
1011 	 * interrupts unblocked, block soft interrupts up to
1012 	 * IPL_SOFTSERIAL.  This way logic that is safe in interrupt
1013 	 * context or under a spin lock is also safe in less
1014 	 * restrictive contexts.
1015 	 */
1016 	s = splsoftserial();
1017 
1018 	/*
1019 	 * Acquire the per-CPU state.  If someone is in the middle of
1020 	 * using it, drop the sample.  Otherwise, take the lock so that
1021 	 * higher-priority interrupts will drop their samples.
1022 	 */
1023 	ec = percpu_getref(entropy_percpu);
1024 	if (ec->ec_locked) {
1025 		ec->ec_evcnt->intrdrop.ev_count++;
1026 		goto out0;
1027 	}
1028 	ec->ec_locked = true;
1029 	__insn_barrier();
1030 
1031 	/*
1032 	 * Enter as much as we can into the per-CPU pool.  If it was
1033 	 * truncated, schedule a softint to stir the pool and stop.
1034 	 */
1035 	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
1036 		if (__predict_true(!cold))
1037 			softint_schedule(entropy_sih);
1038 		ec->ec_evcnt->intrtrunc.ev_count++;
1039 		goto out1;
1040 	}
1041 	fullyused = true;
1042 
1043 	/*
1044 	 * Count up what we can contribute.
1045 	 *
1046 	 * We don't count samples while cold because entropy_timer
1047 	 * might still be returning zero if there's no CPU cycle
1048 	 * counter.
1049 	 */
1050 	bitspending = ec->ec_bitspending;
1051 	bitspending += MIN(MINENTROPYBITS - bitspending, nbits);
1052 	atomic_store_relaxed(&ec->ec_bitspending, bitspending);
1053 	if (__predict_true(count)) {
1054 		samplespending = ec->ec_samplespending;
1055 		samplespending += MIN(MINSAMPLES - samplespending, 1);
1056 		atomic_store_relaxed(&ec->ec_samplespending, samplespending);
1057 	}
1058 
1059 	/* Schedule a softint if we added anything and it matters.  */
1060 	if (__predict_false(atomic_load_relaxed(&E->bitsneeded) ||
1061 		atomic_load_relaxed(&entropy_depletion)) &&
1062 	    (nbits != 0 || count) &&
1063 	    __predict_true(!cold))
1064 		softint_schedule(entropy_sih);
1065 
1066 out1:	/* Release the per-CPU state.  */
1067 	KASSERT(ec->ec_locked);
1068 	__insn_barrier();
1069 	ec->ec_locked = false;
1070 out0:	percpu_putref(entropy_percpu);
1071 	splx(s);
1072 
1073 	return fullyused;
1074 }
1075 
1076 /*
1077  * entropy_softintr(cookie)
1078  *
1079  *	Soft interrupt handler for entering entropy.  Takes care of
1080  *	stirring the local CPU's entropy pool if it filled up during
1081  *	hard interrupts, and promptly crediting entropy from the local
1082  *	CPU's entropy pool to the global entropy pool if needed.
1083  */
1084 static void
1085 entropy_softintr(void *cookie)
1086 {
1087 	struct entropy_cpu_lock lock;
1088 	struct entropy_cpu *ec;
1089 	unsigned bitspending, samplespending;
1090 
1091 	/*
1092 	 * With the per-CPU state locked, stir the pool if necessary
1093 	 * and determine if there's any pending entropy on this CPU to
1094 	 * account globally.
1095 	 */
1096 	ec = entropy_cpu_get(&lock);
1097 	ec->ec_evcnt->softint.ev_count++;
1098 	entpool_stir(ec->ec_pool);
1099 	bitspending = ec->ec_bitspending;
1100 	samplespending = ec->ec_samplespending;
1101 	entropy_cpu_put(&lock, ec);
1102 
1103 	/* Consolidate globally if appropriate based on what we added.  */
1104 	if (bitspending > 0 || samplespending >= MINSAMPLES)
1105 		entropy_account_cpu(ec);
1106 }
1107 
1108 /*
1109  * entropy_thread(cookie)
1110  *
1111  *	Handle any asynchronous entropy housekeeping.
1112  */
1113 static void
1114 entropy_thread(void *cookie)
1115 {
1116 	bool consolidate;
1117 
1118 #ifndef _RUMPKERNEL		/* XXX rump starts threads before cold */
1119 	KASSERT(!cold);
1120 #endif
1121 
1122 	for (;;) {
1123 		/*
1124 		 * Wait until there's full entropy somewhere among the
1125 		 * CPUs, as confirmed at most once per minute, or
1126 		 * someone wants to consolidate.
1127 		 */
1128 		if (entropy_pending()) {
1129 			consolidate = true;
1130 		} else {
1131 			mutex_enter(&E->lock);
1132 			if (!E->consolidate)
1133 				cv_timedwait(&E->cv, &E->lock, 60*hz);
1134 			consolidate = E->consolidate;
1135 			E->consolidate = false;
1136 			mutex_exit(&E->lock);
1137 		}
1138 
1139 		if (consolidate) {
1140 			/* Do it.  */
1141 			entropy_do_consolidate();
1142 
1143 			/* Mitigate abuse.  */
1144 			kpause("entropy", false, hz, NULL);
1145 		}
1146 	}
1147 }
1148 
1149 struct entropy_pending_count {
1150 	uint32_t bitspending;
1151 	uint32_t samplespending;
1152 };
1153 
1154 /*
1155  * entropy_pending()
1156  *
1157  *	True if enough bits or samples are pending on other CPUs to
1158  *	warrant consolidation.
1159  */
1160 static bool
1161 entropy_pending(void)
1162 {
1163 	struct entropy_pending_count count = { 0, 0 }, *C = &count;
1164 
1165 	percpu_foreach(entropy_percpu, &entropy_pending_cpu, C);
1166 	return C->bitspending >= MINENTROPYBITS ||
1167 	    C->samplespending >= MINSAMPLES;
1168 }
1169 
1170 static void
1171 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
1172 {
1173 	struct entropy_cpu *ec = ptr;
1174 	struct entropy_pending_count *C = cookie;
1175 	uint32_t cpu_bitspending;
1176 	uint32_t cpu_samplespending;
1177 
1178 	cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending);
1179 	cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending);
1180 	C->bitspending += MIN(MINENTROPYBITS - C->bitspending,
1181 	    cpu_bitspending);
1182 	C->samplespending += MIN(MINSAMPLES - C->samplespending,
1183 	    cpu_samplespending);
1184 }
1185 
1186 /*
1187  * entropy_do_consolidate()
1188  *
1189  *	Issue a cross-call to gather entropy on all CPUs and advance
1190  *	the entropy epoch.
1191  */
1192 static void
1193 entropy_do_consolidate(void)
1194 {
1195 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1196 	static struct timeval lasttime; /* serialized by E->lock */
1197 	struct entpool pool;
1198 	uint8_t buf[ENTPOOL_CAPACITY];
1199 	unsigned bitsdiff, samplesdiff;
1200 	uint64_t ticket;
1201 
1202 	KASSERT(!cold);
1203 	ASSERT_SLEEPABLE();
1204 
1205 	/* Gather entropy on all CPUs into a temporary pool.  */
1206 	memset(&pool, 0, sizeof pool);
1207 	ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
1208 	xc_wait(ticket);
1209 
1210 	/* Acquire the lock to notify waiters.  */
1211 	mutex_enter(&E->lock);
1212 
1213 	/* Count another consolidation.  */
1214 	entropy_consolidate_evcnt.ev_count++;
1215 
1216 	/* Note when we last consolidated, i.e. now.  */
1217 	E->timestamp = time_uptime;
1218 
1219 	/* Mix what we gathered into the global pool.  */
1220 	entpool_extract(&pool, buf, sizeof buf);
1221 	entpool_enter(&E->pool, buf, sizeof buf);
1222 	explicit_memset(&pool, 0, sizeof pool);
1223 
1224 	/* Count the entropy that was gathered.  */
1225 	bitsdiff = MIN(E->bitsneeded, E->bitspending);
1226 	atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff);
1227 	E->bitspending -= bitsdiff;
1228 	if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) {
1229 		if ((boothowto & AB_DEBUG) != 0 &&
1230 		    ratecheck(&lasttime, &interval)) {
1231 			printf("WARNING:"
1232 			    " consolidating less than full entropy\n");
1233 		}
1234 	}
1235 
1236 	samplesdiff = MIN(E->samplesneeded, E->samplespending);
1237 	atomic_store_relaxed(&E->samplesneeded,
1238 	    E->samplesneeded - samplesdiff);
1239 	E->samplespending -= samplesdiff;
1240 
1241 	/* Advance the epoch and notify waiters.  */
1242 	entropy_notify();
1243 
1244 	/* Release the lock.  */
1245 	mutex_exit(&E->lock);
1246 }
1247 
1248 /*
1249  * entropy_consolidate_xc(vpool, arg2)
1250  *
1251  *	Extract output from the local CPU's input pool and enter it
1252  *	into a temporary pool passed as vpool.
1253  */
1254 static void
1255 entropy_consolidate_xc(void *vpool, void *arg2 __unused)
1256 {
1257 	struct entpool *pool = vpool;
1258 	struct entropy_cpu_lock lock;
1259 	struct entropy_cpu *ec;
1260 	uint8_t buf[ENTPOOL_CAPACITY];
1261 	uint32_t extra[7];
1262 	unsigned i = 0;
1263 
1264 	/* Grab CPU number and cycle counter to mix extra into the pool.  */
1265 	extra[i++] = cpu_number();
1266 	extra[i++] = entropy_timer();
1267 
1268 	/*
1269 	 * With the per-CPU state locked, extract from the per-CPU pool
1270 	 * and count it as no longer pending.
1271 	 */
1272 	ec = entropy_cpu_get(&lock);
1273 	extra[i++] = entropy_timer();
1274 	entpool_extract(ec->ec_pool, buf, sizeof buf);
1275 	atomic_store_relaxed(&ec->ec_bitspending, 0);
1276 	atomic_store_relaxed(&ec->ec_samplespending, 0);
1277 	extra[i++] = entropy_timer();
1278 	entropy_cpu_put(&lock, ec);
1279 	extra[i++] = entropy_timer();
1280 
1281 	/*
1282 	 * Copy over statistics, and enter the per-CPU extract and the
1283 	 * extra timing into the temporary pool, under the global lock.
1284 	 */
1285 	mutex_enter(&E->lock);
1286 	extra[i++] = entropy_timer();
1287 	entpool_enter(pool, buf, sizeof buf);
1288 	explicit_memset(buf, 0, sizeof buf);
1289 	extra[i++] = entropy_timer();
1290 	KASSERT(i == __arraycount(extra));
1291 	entpool_enter(pool, extra, sizeof extra);
1292 	explicit_memset(extra, 0, sizeof extra);
1293 	mutex_exit(&E->lock);
1294 }
1295 
1296 /*
1297  * entropy_notify()
1298  *
1299  *	Caller just contributed entropy to the global pool.  Advance
1300  *	the entropy epoch and notify waiters.
1301  *
1302  *	Caller must hold the global entropy lock.
1303  */
1304 static void
1305 entropy_notify(void)
1306 {
1307 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1308 	static struct timeval lasttime; /* serialized by E->lock */
1309 	static bool ready = false, besteffort = false;
1310 	unsigned epoch;
1311 
1312 	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1313 
1314 	/*
1315 	 * If this is the first time, print a message to the console
1316 	 * that we're ready so operators can compare it to the timing
1317 	 * of other events.
1318 	 *
1319 	 * If we didn't get full entropy from reliable sources, report
1320 	 * instead that we are running on fumes with best effort.  (If
1321 	 * we ever do get full entropy after that, print the ready
1322 	 * message once.)
1323 	 */
1324 	if (__predict_false(!ready)) {
1325 		if (E->bitsneeded == 0) {
1326 			printf("entropy: ready\n");
1327 			ready = true;
1328 		} else if (E->samplesneeded == 0 && !besteffort) {
1329 			printf("entropy: best effort\n");
1330 			besteffort = true;
1331 		}
1332 	}
1333 
1334 	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
1335 	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
1336 	    ratecheck(&lasttime, &interval)) {
1337 		epoch = E->epoch + 1;
1338 		if (epoch == 0 || epoch == (unsigned)-1)
1339 			epoch = 1;
1340 		atomic_store_relaxed(&E->epoch, epoch);
1341 	}
1342 	KASSERT(E->epoch != (unsigned)-1);
1343 
1344 	/* Notify waiters.  */
1345 	if (__predict_true(!cold)) {
1346 		cv_broadcast(&E->cv);
1347 		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
1348 	}
1349 
1350 	/* Count another notification.  */
1351 	entropy_notify_evcnt.ev_count++;
1352 }
1353 
1354 /*
1355  * entropy_consolidate()
1356  *
1357  *	Trigger entropy consolidation and wait for it to complete, or
1358  *	return EINTR if interrupted by a signal.
1359  *
1360  *	This should be used sparingly, not periodically -- requiring
1361  *	conscious intervention by the operator or a clear policy
1362  *	decision.  Otherwise, the kernel will automatically consolidate
1363  *	when enough entropy has been gathered into per-CPU pools to
1364  *	transition to full entropy.
1365  */
1366 int
1367 entropy_consolidate(void)
1368 {
1369 	uint64_t ticket;
1370 	int error;
1371 
1372 	KASSERT(!cold);
1373 	ASSERT_SLEEPABLE();
1374 
1375 	mutex_enter(&E->lock);
1376 	ticket = entropy_consolidate_evcnt.ev_count;
1377 	E->consolidate = true;
1378 	cv_broadcast(&E->cv);
1379 	while (ticket == entropy_consolidate_evcnt.ev_count) {
1380 		error = cv_wait_sig(&E->cv, &E->lock);
1381 		if (error)
1382 			break;
1383 	}
1384 	mutex_exit(&E->lock);
1385 
1386 	return error;
1387 }
1388 
1389 /*
1390  * sysctl -w kern.entropy.consolidate=1
1391  *
1392  *	Trigger entropy consolidation and wait for it to complete.
1393  *	Writable only by superuser.  This, writing to /dev/random, and
1394  *	ioctl(RNDADDDATA) are the only ways for the system to
1395  *	consolidate entropy if the operator knows something the kernel
1396  *	doesn't about how unpredictable the pending entropy pools are.
1397  */
1398 static int
1399 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
1400 {
1401 	struct sysctlnode node = *rnode;
1402 	int arg = 0;
1403 	int error;
1404 
1405 	node.sysctl_data = &arg;
1406 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1407 	if (error || newp == NULL)
1408 		return error;
1409 	if (arg)
1410 		error = entropy_consolidate();
1411 
1412 	return error;
1413 }
1414 
1415 /*
1416  * entropy_gather()
1417  *
1418  *	Trigger gathering entropy from all on-demand sources, and, if
1419  *	requested, wait for synchronous sources (but not asynchronous
1420  *	sources) to complete, or fail with EINTR if interrupted by a
1421  *	signal.
1422  */
1423 int
1424 entropy_gather(void)
1425 {
1426 	int error;
1427 
1428 	mutex_enter(&E->lock);
1429 	error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT|ENTROPY_SIG);
1430 	mutex_exit(&E->lock);
1431 
1432 	return error;
1433 }
1434 
1435 /*
1436  * sysctl -w kern.entropy.gather=1
1437  *
1438  *	Trigger gathering entropy from all on-demand sources, and wait
1439  *	for synchronous sources (but not asynchronous sources) to
1440  *	complete.  Writable only by superuser.
1441  */
1442 static int
1443 sysctl_entropy_gather(SYSCTLFN_ARGS)
1444 {
1445 	struct sysctlnode node = *rnode;
1446 	int arg = 0;
1447 	int error;
1448 
1449 	node.sysctl_data = &arg;
1450 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1451 	if (error || newp == NULL)
1452 		return error;
1453 	if (arg)
1454 		error = entropy_gather();
1455 
1456 	return error;
1457 }
1458 
1459 /*
1460  * entropy_extract(buf, len, flags)
1461  *
1462  *	Extract len bytes from the global entropy pool into buf.
1463  *
1464  *	Caller MUST NOT expose these bytes directly -- must use them
1465  *	ONLY to seed a cryptographic pseudorandom number generator
1466  *	(`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'),
1467  *	and then erase them.  entropy_extract does not, on its own,
1468  *	provide backtracking resistance -- it must be combined with a
1469  *	PRNG/DRBG that does.
1470  *
1471  *	This may be used very early at boot, before even entropy_init
1472  *	has been called.
1473  *
1474  *	You generally shouldn't use this directly -- use cprng(9)
1475  *	instead.
1476  *
1477  *	Flags may have:
1478  *
1479  *		ENTROPY_WAIT	Wait for entropy if not available yet.
1480  *		ENTROPY_SIG	Allow interruption by a signal during wait.
1481  *		ENTROPY_HARDFAIL Either fill the buffer with full entropy,
1482  *				or fail without filling it at all.
1483  *
1484  *	Return zero on success, or error on failure:
1485  *
1486  *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
1487  *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
1488  *
1489  *	If ENTROPY_WAIT is set, allowed only in thread context.  If
1490  *	ENTROPY_WAIT is not set, allowed also in softint context -- may
1491  *	sleep on an adaptive lock up to IPL_SOFTSERIAL.  Forbidden in
1492  *	hard interrupt context.
1493  */
1494 int
1495 entropy_extract(void *buf, size_t len, int flags)
1496 {
1497 	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1498 	static struct timeval lasttime; /* serialized by E->lock */
1499 	bool printed = false;
1500 	int s = -1/*XXXGCC*/, error;
1501 
1502 	if (ISSET(flags, ENTROPY_WAIT)) {
1503 		ASSERT_SLEEPABLE();
1504 		KASSERT(!cold);
1505 	}
1506 
1507 	/* Refuse to operate in interrupt context.  */
1508 	KASSERT(!cpu_intr_p());
1509 
1510 	/*
1511 	 * If we're cold, we are only contending with interrupts on the
1512 	 * current CPU, so block them.  Otherwise, we are _not_
1513 	 * contending with interrupts on the current CPU, but we are
1514 	 * contending with other threads, to exclude them with a mutex.
1515 	 */
1516 	if (__predict_false(cold))
1517 		s = splhigh();
1518 	else
1519 		mutex_enter(&E->lock);
1520 
1521 	/* Wait until there is enough entropy in the system.  */
1522 	error = 0;
1523 	if (E->bitsneeded > 0 && E->samplesneeded == 0) {
1524 		/*
1525 		 * We don't have full entropy from reliable sources,
1526 		 * but we gathered a plausible number of samples from
1527 		 * other sources such as timers.  Try asking for more
1528 		 * from any sources we can, but don't worry if it
1529 		 * fails -- best effort.
1530 		 */
1531 		(void)entropy_request(ENTROPY_CAPACITY, flags);
1532 	} else while (E->bitsneeded > 0 && E->samplesneeded > 0) {
1533 		/* Ask for more, synchronously if possible.  */
1534 		error = entropy_request(len, flags);
1535 		if (error)
1536 			break;
1537 
1538 		/* If we got enough, we're done.  */
1539 		if (E->bitsneeded == 0 || E->samplesneeded == 0) {
1540 			KASSERT(error == 0);
1541 			break;
1542 		}
1543 
1544 		/* If not waiting, stop here.  */
1545 		if (!ISSET(flags, ENTROPY_WAIT)) {
1546 			error = EWOULDBLOCK;
1547 			break;
1548 		}
1549 
1550 		/* Wait for some entropy to come in and try again.  */
1551 		KASSERT(!cold);
1552 		if (!printed) {
1553 			printf("entropy: pid %d (%s) waiting for entropy(7)\n",
1554 			    curproc->p_pid, curproc->p_comm);
1555 			printed = true;
1556 		}
1557 
1558 		if (ISSET(flags, ENTROPY_SIG)) {
1559 			error = cv_timedwait_sig(&E->cv, &E->lock, hz);
1560 			if (error && error != EWOULDBLOCK)
1561 				break;
1562 		} else {
1563 			cv_timedwait(&E->cv, &E->lock, hz);
1564 		}
1565 	}
1566 
1567 	/*
1568 	 * Count failure -- but fill the buffer nevertheless, unless
1569 	 * the caller specified ENTROPY_HARDFAIL.
1570 	 */
1571 	if (error) {
1572 		if (ISSET(flags, ENTROPY_HARDFAIL))
1573 			goto out;
1574 		entropy_extract_fail_evcnt.ev_count++;
1575 	}
1576 
1577 	/*
1578 	 * Report a warning if we haven't yet reached full entropy.
1579 	 * This is the only case where we consider entropy to be
1580 	 * `depleted' without kern.entropy.depletion enabled -- when we
1581 	 * only have partial entropy, an adversary may be able to
1582 	 * narrow the state of the pool down to a small number of
1583 	 * possibilities; the output then enables them to confirm a
1584 	 * guess, reducing its entropy from the adversary's perspective
1585 	 * to zero.
1586 	 *
1587 	 * This should only happen if the operator has chosen to
1588 	 * consolidate, either through sysctl kern.entropy.consolidate
1589 	 * or by writing less than full entropy to /dev/random as root
1590 	 * (which /dev/random promises will immediately affect
1591 	 * subsequent output, for better or worse).
1592 	 */
1593 	if (E->bitsneeded > 0 && E->samplesneeded > 0) {
1594 		if (__predict_false(E->epoch == (unsigned)-1) &&
1595 		    ratecheck(&lasttime, &interval)) {
1596 			printf("WARNING:"
1597 			    " system needs entropy for security;"
1598 			    " see entropy(7)\n");
1599 		}
1600 		atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS);
1601 		atomic_store_relaxed(&E->samplesneeded, MINSAMPLES);
1602 	}
1603 
1604 	/* Extract data from the pool, and `deplete' if we're doing that.  */
1605 	entpool_extract(&E->pool, buf, len);
1606 	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
1607 	    error == 0) {
1608 		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
1609 		unsigned bitsneeded = E->bitsneeded;
1610 		unsigned samplesneeded = E->samplesneeded;
1611 
1612 		bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost);
1613 		samplesneeded += MIN(MINSAMPLES - samplesneeded, cost);
1614 
1615 		atomic_store_relaxed(&E->bitsneeded, bitsneeded);
1616 		atomic_store_relaxed(&E->samplesneeded, samplesneeded);
1617 		entropy_deplete_evcnt.ev_count++;
1618 	}
1619 
1620 out:	/* Release the global lock and return the error.  */
1621 	if (__predict_false(cold))
1622 		splx(s);
1623 	else
1624 		mutex_exit(&E->lock);
1625 	return error;
1626 }
1627 
1628 /*
1629  * entropy_poll(events)
1630  *
1631  *	Return the subset of events ready, and if it is not all of
1632  *	events, record curlwp as waiting for entropy.
1633  */
1634 int
1635 entropy_poll(int events)
1636 {
1637 	int revents = 0;
1638 
1639 	KASSERT(!cold);
1640 
1641 	/* Always ready for writing.  */
1642 	revents |= events & (POLLOUT|POLLWRNORM);
1643 
1644 	/* Narrow it down to reads.  */
1645 	events &= POLLIN|POLLRDNORM;
1646 	if (events == 0)
1647 		return revents;
1648 
1649 	/*
1650 	 * If we have reached full entropy and we're not depleting
1651 	 * entropy, we are forever ready.
1652 	 */
1653 	if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 ||
1654 		atomic_load_relaxed(&E->samplesneeded) == 0) &&
1655 	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
1656 		return revents | events;
1657 
1658 	/*
1659 	 * Otherwise, check whether we need entropy under the lock.  If
1660 	 * we don't, we're ready; if we do, add ourselves to the queue.
1661 	 */
1662 	mutex_enter(&E->lock);
1663 	if (E->bitsneeded == 0 || E->samplesneeded == 0)
1664 		revents |= events;
1665 	else
1666 		selrecord(curlwp, &E->selq);
1667 	mutex_exit(&E->lock);
1668 
1669 	return revents;
1670 }
1671 
1672 /*
1673  * filt_entropy_read_detach(kn)
1674  *
1675  *	struct filterops::f_detach callback for entropy read events:
1676  *	remove kn from the list of waiters.
1677  */
1678 static void
1679 filt_entropy_read_detach(struct knote *kn)
1680 {
1681 
1682 	KASSERT(!cold);
1683 
1684 	mutex_enter(&E->lock);
1685 	selremove_knote(&E->selq, kn);
1686 	mutex_exit(&E->lock);
1687 }
1688 
1689 /*
1690  * filt_entropy_read_event(kn, hint)
1691  *
1692  *	struct filterops::f_event callback for entropy read events:
1693  *	poll for entropy.  Caller must hold the global entropy lock if
1694  *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
1695  */
1696 static int
1697 filt_entropy_read_event(struct knote *kn, long hint)
1698 {
1699 	int ret;
1700 
1701 	KASSERT(!cold);
1702 
1703 	/* Acquire the lock, if caller is outside entropy subsystem.  */
1704 	if (hint == NOTE_SUBMIT)
1705 		KASSERT(mutex_owned(&E->lock));
1706 	else
1707 		mutex_enter(&E->lock);
1708 
1709 	/*
1710 	 * If we still need entropy, can't read anything; if not, can
1711 	 * read arbitrarily much.
1712 	 */
1713 	if (E->bitsneeded != 0 && E->samplesneeded != 0) {
1714 		ret = 0;
1715 	} else {
1716 		if (atomic_load_relaxed(&entropy_depletion))
1717 			kn->kn_data = ENTROPY_CAPACITY; /* bytes */
1718 		else
1719 			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
1720 		ret = 1;
1721 	}
1722 
1723 	/* Release the lock, if caller is outside entropy subsystem.  */
1724 	if (hint == NOTE_SUBMIT)
1725 		KASSERT(mutex_owned(&E->lock));
1726 	else
1727 		mutex_exit(&E->lock);
1728 
1729 	return ret;
1730 }
1731 
1732 /* XXX Makes sense only for /dev/u?random.  */
1733 static const struct filterops entropy_read_filtops = {
1734 	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
1735 	.f_attach = NULL,
1736 	.f_detach = filt_entropy_read_detach,
1737 	.f_event = filt_entropy_read_event,
1738 };
1739 
1740 /*
1741  * entropy_kqfilter(kn)
1742  *
1743  *	Register kn to receive entropy event notifications.  May be
1744  *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
1745  */
1746 int
1747 entropy_kqfilter(struct knote *kn)
1748 {
1749 
1750 	KASSERT(!cold);
1751 
1752 	switch (kn->kn_filter) {
1753 	case EVFILT_READ:
1754 		/* Enter into the global select queue.  */
1755 		mutex_enter(&E->lock);
1756 		kn->kn_fop = &entropy_read_filtops;
1757 		selrecord_knote(&E->selq, kn);
1758 		mutex_exit(&E->lock);
1759 		return 0;
1760 	case EVFILT_WRITE:
1761 		/* Can always dump entropy into the system.  */
1762 		kn->kn_fop = &seltrue_filtops;
1763 		return 0;
1764 	default:
1765 		return EINVAL;
1766 	}
1767 }
1768 
1769 /*
1770  * rndsource_setcb(rs, get, getarg)
1771  *
1772  *	Set the request callback for the entropy source rs, if it can
1773  *	provide entropy on demand.  Must precede rnd_attach_source.
1774  */
1775 void
1776 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
1777     void *getarg)
1778 {
1779 
1780 	rs->get = get;
1781 	rs->getarg = getarg;
1782 }
1783 
1784 /*
1785  * rnd_attach_source(rs, name, type, flags)
1786  *
1787  *	Attach the entropy source rs.  Must be done after
1788  *	rndsource_setcb, if any, and before any calls to rnd_add_data.
1789  */
1790 void
1791 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
1792     uint32_t flags)
1793 {
1794 	uint32_t extra[4];
1795 	unsigned i = 0;
1796 
1797 	KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name");
1798 
1799 	/* Grab cycle counter to mix extra into the pool.  */
1800 	extra[i++] = entropy_timer();
1801 
1802 	/*
1803 	 * Apply some standard flags:
1804 	 *
1805 	 * - We do not bother with network devices by default, for
1806 	 *   hysterical raisins (perhaps: because it is often the case
1807 	 *   that an adversary can influence network packet timings).
1808 	 */
1809 	switch (type) {
1810 	case RND_TYPE_NET:
1811 		flags |= RND_FLAG_NO_COLLECT;
1812 		break;
1813 	}
1814 
1815 	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
1816 	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
1817 
1818 	/* Initialize the random source.  */
1819 	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
1820 	strlcpy(rs->name, name, sizeof(rs->name));
1821 	memset(&rs->time_delta, 0, sizeof(rs->time_delta));
1822 	memset(&rs->value_delta, 0, sizeof(rs->value_delta));
1823 	rs->total = 0;
1824 	rs->type = type;
1825 	rs->flags = flags;
1826 	if (entropy_percpu != NULL)
1827 		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
1828 	extra[i++] = entropy_timer();
1829 
1830 	/* Wire it into the global list of random sources.  */
1831 	if (__predict_true(!cold))
1832 		mutex_enter(&E->lock);
1833 	LIST_INSERT_HEAD(&E->sources, rs, list);
1834 	if (__predict_true(!cold))
1835 		mutex_exit(&E->lock);
1836 	extra[i++] = entropy_timer();
1837 
1838 	/* Request that it provide entropy ASAP, if we can.  */
1839 	if (ISSET(flags, RND_FLAG_HASCB))
1840 		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
1841 	extra[i++] = entropy_timer();
1842 
1843 	/* Mix the extra into the pool.  */
1844 	KASSERT(i == __arraycount(extra));
1845 	entropy_enter(extra, sizeof extra, 0, /*count*/__predict_true(!cold));
1846 	explicit_memset(extra, 0, sizeof extra);
1847 }
1848 
1849 /*
1850  * rnd_detach_source(rs)
1851  *
1852  *	Detach the entropy source rs.  May sleep waiting for users to
1853  *	drain.  Further use is not allowed.
1854  */
1855 void
1856 rnd_detach_source(struct krndsource *rs)
1857 {
1858 
1859 	/*
1860 	 * If we're cold (shouldn't happen, but hey), just remove it
1861 	 * from the list -- there's nothing allocated.
1862 	 */
1863 	if (__predict_false(cold) && entropy_percpu == NULL) {
1864 		LIST_REMOVE(rs, list);
1865 		return;
1866 	}
1867 
1868 	/* We may have to wait for entropy_request.  */
1869 	ASSERT_SLEEPABLE();
1870 
1871 	/* Wait until the source list is not in use, and remove it.  */
1872 	mutex_enter(&E->lock);
1873 	while (E->sourcelock)
1874 		cv_wait(&E->sourcelock_cv, &E->lock);
1875 	LIST_REMOVE(rs, list);
1876 	mutex_exit(&E->lock);
1877 
1878 	/* Free the per-CPU data.  */
1879 	percpu_free(rs->state, sizeof(struct rndsource_cpu));
1880 }
1881 
1882 /*
1883  * rnd_lock_sources(flags)
1884  *
1885  *	Lock the list of entropy sources.  Caller must hold the global
1886  *	entropy lock.  If successful, no rndsource will go away until
1887  *	rnd_unlock_sources even while the caller releases the global
1888  *	entropy lock.
1889  *
1890  *	May be called very early at boot, before entropy_init.
1891  *
1892  *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1893  *	If flags & ENTROPY_SIG, allow interruption by signal.
1894  */
1895 static int __attribute__((warn_unused_result))
1896 rnd_lock_sources(int flags)
1897 {
1898 	int error;
1899 
1900 	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1901 	KASSERT(!cpu_intr_p());
1902 
1903 	while (E->sourcelock) {
1904 		KASSERT(!cold);
1905 		if (!ISSET(flags, ENTROPY_WAIT))
1906 			return EWOULDBLOCK;
1907 		if (ISSET(flags, ENTROPY_SIG)) {
1908 			error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
1909 			if (error)
1910 				return error;
1911 		} else {
1912 			cv_wait(&E->sourcelock_cv, &E->lock);
1913 		}
1914 	}
1915 
1916 	E->sourcelock = curlwp;
1917 	return 0;
1918 }
1919 
1920 /*
1921  * rnd_unlock_sources()
1922  *
1923  *	Unlock the list of sources after rnd_lock_sources.  Caller must
1924  *	hold the global entropy lock.
1925  *
1926  *	May be called very early at boot, before entropy_init.
1927  */
1928 static void
1929 rnd_unlock_sources(void)
1930 {
1931 
1932 	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1933 	KASSERT(!cpu_intr_p());
1934 
1935 	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
1936 	    curlwp, E->sourcelock);
1937 	E->sourcelock = NULL;
1938 	if (__predict_true(!cold))
1939 		cv_signal(&E->sourcelock_cv);
1940 }
1941 
1942 /*
1943  * rnd_sources_locked()
1944  *
1945  *	True if we hold the list of rndsources locked, for diagnostic
1946  *	assertions.
1947  *
1948  *	May be called very early at boot, before entropy_init.
1949  */
1950 static bool __diagused
1951 rnd_sources_locked(void)
1952 {
1953 
1954 	return E->sourcelock == curlwp;
1955 }
1956 
1957 /*
1958  * entropy_request(nbytes, flags)
1959  *
1960  *	Request nbytes bytes of entropy from all sources in the system.
1961  *	OK if we overdo it.  Caller must hold the global entropy lock;
1962  *	will release and re-acquire it.
1963  *
1964  *	May be called very early at boot, before entropy_init.
1965  *
1966  *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1967  *	If flags & ENTROPY_SIG, allow interruption by signal.
1968  */
1969 static int
1970 entropy_request(size_t nbytes, int flags)
1971 {
1972 	struct krndsource *rs;
1973 	int error;
1974 
1975 	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1976 	KASSERT(!cpu_intr_p());
1977 	if ((flags & ENTROPY_WAIT) != 0 && __predict_false(!cold))
1978 		ASSERT_SLEEPABLE();
1979 
1980 	/*
1981 	 * Lock the list of entropy sources to block rnd_detach_source
1982 	 * until we're done, and to serialize calls to the entropy
1983 	 * callbacks as guaranteed to drivers.
1984 	 */
1985 	error = rnd_lock_sources(flags);
1986 	if (error)
1987 		return error;
1988 	entropy_request_evcnt.ev_count++;
1989 
1990 	/* Clamp to the maximum reasonable request.  */
1991 	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
1992 
1993 	/* Walk the list of sources.  */
1994 	LIST_FOREACH(rs, &E->sources, list) {
1995 		/* Skip sources without callbacks.  */
1996 		if (!ISSET(rs->flags, RND_FLAG_HASCB))
1997 			continue;
1998 
1999 		/*
2000 		 * Skip sources that are disabled altogether -- we
2001 		 * would just ignore their samples anyway.
2002 		 */
2003 		if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
2004 			continue;
2005 
2006 		/* Drop the lock while we call the callback.  */
2007 		if (__predict_true(!cold))
2008 			mutex_exit(&E->lock);
2009 		(*rs->get)(nbytes, rs->getarg);
2010 		if (__predict_true(!cold))
2011 			mutex_enter(&E->lock);
2012 	}
2013 
2014 	/* Request done; unlock the list of entropy sources.  */
2015 	rnd_unlock_sources();
2016 	return 0;
2017 }
2018 
2019 static inline uint32_t
2020 rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
2021 {
2022 	int32_t delta2, delta3;
2023 
2024 	/*
2025 	 * Calculate the second and third order differentials
2026 	 */
2027 	delta2 = d->dx - delta;
2028 	if (delta2 < 0)
2029 		delta2 = -delta2; /* XXX arithmetic overflow */
2030 
2031 	delta3 = d->d2x - delta2;
2032 	if (delta3 < 0)
2033 		delta3 = -delta3; /* XXX arithmetic overflow */
2034 
2035 	d->x = v;
2036 	d->dx = delta;
2037 	d->d2x = delta2;
2038 
2039 	/*
2040 	 * If any delta is 0, we got no entropy.  If all are non-zero, we
2041 	 * might have something.
2042 	 */
2043 	if (delta == 0 || delta2 == 0 || delta3 == 0)
2044 		return 0;
2045 
2046 	return 1;
2047 }
2048 
2049 static inline uint32_t
2050 rnd_dt_estimate(struct krndsource *rs, uint32_t t)
2051 {
2052 	int32_t delta;
2053 	uint32_t ret;
2054 	rnd_delta_t *d;
2055 	struct rndsource_cpu *rc;
2056 
2057 	rc = percpu_getref(rs->state);
2058 	d = &rc->rc_timedelta;
2059 
2060 	if (t < d->x) {
2061 		delta = UINT32_MAX - d->x + t;
2062 	} else {
2063 		delta = d->x - t;
2064 	}
2065 
2066 	if (delta < 0) {
2067 		delta = -delta;	/* XXX arithmetic overflow */
2068 	}
2069 
2070 	ret = rnd_delta_estimate(d, t, delta);
2071 
2072 	KASSERT(d->x == t);
2073 	KASSERT(d->dx == delta);
2074 	percpu_putref(rs->state);
2075 	return ret;
2076 }
2077 
2078 /*
2079  * rnd_add_uint32(rs, value)
2080  *
2081  *	Enter 32 bits of data from an entropy source into the pool.
2082  *
2083  *	May be called from any context or with spin locks held, but may
2084  *	drop data.
2085  *
2086  *	This is meant for cheaply taking samples from devices that
2087  *	aren't designed to be hardware random number generators.
2088  */
2089 void
2090 rnd_add_uint32(struct krndsource *rs, uint32_t value)
2091 {
2092 	bool intr_p = true;
2093 
2094 	rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p);
2095 }
2096 
2097 void
2098 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
2099 {
2100 	bool intr_p = true;
2101 
2102 	rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p);
2103 }
2104 
2105 void
2106 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
2107 {
2108 	bool intr_p = true;
2109 
2110 	rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p);
2111 }
2112 
2113 /*
2114  * rnd_add_data(rs, buf, len, entropybits)
2115  *
2116  *	Enter data from an entropy source into the pool, with a
2117  *	driver's estimate of how much entropy the physical source of
2118  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
2119  *	estimate and treat it as zero.
2120  *
2121  *	rs MAY but SHOULD NOT be NULL.  If rs is NULL, MUST NOT be
2122  *	called from interrupt context or with spin locks held.
2123  *
2124  *	If rs is non-NULL, MAY but SHOULD NOT be called from interrupt
2125  *	context, in which case act like rnd_add_data_intr -- if the
2126  *	sample buffer is full, schedule a softint and drop any
2127  *	additional data on the floor.  (This may change later once we
2128  *	fix drivers that still call this from interrupt context to use
2129  *	rnd_add_data_intr instead.)  MUST NOT be called with spin locks
2130  *	held if not in hard interrupt context -- i.e., MUST NOT be
2131  *	called in thread context or softint context with spin locks
2132  *	held.
2133  */
2134 void
2135 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
2136     uint32_t entropybits)
2137 {
2138 	bool intr_p = cpu_intr_p(); /* XXX make this unconditionally false */
2139 
2140 	/*
2141 	 * Weird legacy exception that we should rip out and replace by
2142 	 * creating new rndsources to attribute entropy to the callers:
2143 	 * If there's no rndsource, just enter the data and time now.
2144 	 */
2145 	if (rs == NULL) {
2146 		uint32_t extra;
2147 
2148 		KASSERT(!intr_p);
2149 		KASSERTMSG(howmany(entropybits, NBBY) <= len,
2150 		    "%s: impossible entropy rate:"
2151 		    " %"PRIu32" bits in %"PRIu32"-byte string",
2152 		    rs ? rs->name : "(anonymous)", entropybits, len);
2153 		entropy_enter(buf, len, entropybits, /*count*/false);
2154 		extra = entropy_timer();
2155 		entropy_enter(&extra, sizeof extra, 0, /*count*/false);
2156 		explicit_memset(&extra, 0, sizeof extra);
2157 		return;
2158 	}
2159 
2160 	rnd_add_data_internal(rs, buf, len, entropybits, intr_p);
2161 }
2162 
2163 /*
2164  * rnd_add_data_intr(rs, buf, len, entropybits)
2165  *
2166  *	Try to enter data from an entropy source into the pool, with a
2167  *	driver's estimate of how much entropy the physical source of
2168  *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
2169  *	estimate and treat it as zero.  If the sample buffer is full,
2170  *	schedule a softint and drop any additional data on the floor.
2171  */
2172 void
2173 rnd_add_data_intr(struct krndsource *rs, const void *buf, uint32_t len,
2174     uint32_t entropybits)
2175 {
2176 	bool intr_p = true;
2177 
2178 	rnd_add_data_internal(rs, buf, len, entropybits, intr_p);
2179 }
2180 
2181 /*
2182  * rnd_add_data_internal(rs, buf, len, entropybits, intr_p)
2183  *
2184  *	Internal subroutine to decide whether or not to enter data or
2185  *	timing for a particular rndsource, and if so, to enter it.
2186  *
2187  *	intr_p is true for callers from interrupt context or spin locks
2188  *	held, and false for callers from thread or soft interrupt
2189  *	context and no spin locks held.
2190  */
2191 static void
2192 rnd_add_data_internal(struct krndsource *rs, const void *buf, uint32_t len,
2193     uint32_t entropybits, bool intr_p)
2194 {
2195 	uint32_t flags;
2196 
2197 	KASSERTMSG(howmany(entropybits, NBBY) <= len,
2198 	    "%s: impossible entropy rate:"
2199 	    " %"PRIu32" bits in %"PRIu32"-byte string",
2200 	    rs ? rs->name : "(anonymous)", entropybits, len);
2201 
2202 	/*
2203 	 * Hold up the reset xcall before it zeroes the entropy counts
2204 	 * on this CPU or globally.  Otherwise, we might leave some
2205 	 * nonzero entropy attributed to an untrusted source in the
2206 	 * event of a race with a change to flags.
2207 	 */
2208 	kpreempt_disable();
2209 
2210 	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
2211 	flags = atomic_load_relaxed(&rs->flags);
2212 
2213 	/*
2214 	 * Skip if:
2215 	 * - we're not collecting entropy, or
2216 	 * - the operator doesn't want to collect entropy from this, or
2217 	 * - neither data nor timings are being collected from this.
2218 	 */
2219 	if (!atomic_load_relaxed(&entropy_collection) ||
2220 	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
2221 	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
2222 		goto out;
2223 
2224 	/* If asked, ignore the estimate.  */
2225 	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
2226 		entropybits = 0;
2227 
2228 	/* If we are collecting data, enter them.  */
2229 	if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) {
2230 		rnd_add_data_1(rs, buf, len, entropybits, /*count*/false,
2231 		    RND_FLAG_COLLECT_VALUE, intr_p);
2232 	}
2233 
2234 	/* If we are collecting timings, enter one.  */
2235 	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
2236 		uint32_t extra;
2237 		bool count;
2238 
2239 		/* Sample a timer.  */
2240 		extra = entropy_timer();
2241 
2242 		/* If asked, do entropy estimation on the time.  */
2243 		if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) ==
2244 		    RND_FLAG_ESTIMATE_TIME && __predict_true(!cold))
2245 			count = rnd_dt_estimate(rs, extra);
2246 		else
2247 			count = false;
2248 
2249 		rnd_add_data_1(rs, &extra, sizeof extra, 0, count,
2250 		    RND_FLAG_COLLECT_TIME, intr_p);
2251 	}
2252 
2253 out:	/* Allow concurrent changes to flags to finish.  */
2254 	kpreempt_enable();
2255 }
2256 
2257 static unsigned
2258 add_sat(unsigned a, unsigned b)
2259 {
2260 	unsigned c = a + b;
2261 
2262 	return (c < a ? UINT_MAX : c);
2263 }
2264 
2265 /*
2266  * rnd_add_data_1(rs, buf, len, entropybits, count, flag)
2267  *
2268  *	Internal subroutine to call either entropy_enter_intr, if we're
2269  *	in interrupt context, or entropy_enter if not, and to count the
2270  *	entropy in an rndsource.
2271  */
2272 static void
2273 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
2274     uint32_t entropybits, bool count, uint32_t flag, bool intr_p)
2275 {
2276 	bool fullyused;
2277 
2278 	/*
2279 	 * For the interrupt-like path, use entropy_enter_intr and take
2280 	 * note of whether it consumed the full sample; otherwise, use
2281 	 * entropy_enter, which always consumes the full sample.
2282 	 */
2283 	if (intr_p) {
2284 		fullyused = entropy_enter_intr(buf, len, entropybits, count);
2285 	} else {
2286 		entropy_enter(buf, len, entropybits, count);
2287 		fullyused = true;
2288 	}
2289 
2290 	/*
2291 	 * If we used the full sample, note how many bits were
2292 	 * contributed from this source.
2293 	 */
2294 	if (fullyused) {
2295 		if (__predict_false(cold)) {
2296 			const int s = splhigh();
2297 			rs->total = add_sat(rs->total, entropybits);
2298 			switch (flag) {
2299 			case RND_FLAG_COLLECT_TIME:
2300 				rs->time_delta.insamples =
2301 				    add_sat(rs->time_delta.insamples, 1);
2302 				break;
2303 			case RND_FLAG_COLLECT_VALUE:
2304 				rs->value_delta.insamples =
2305 				    add_sat(rs->value_delta.insamples, 1);
2306 				break;
2307 			}
2308 			splx(s);
2309 		} else {
2310 			struct rndsource_cpu *rc = percpu_getref(rs->state);
2311 
2312 			atomic_store_relaxed(&rc->rc_entropybits,
2313 			    add_sat(rc->rc_entropybits, entropybits));
2314 			switch (flag) {
2315 			case RND_FLAG_COLLECT_TIME:
2316 				atomic_store_relaxed(&rc->rc_timesamples,
2317 				    add_sat(rc->rc_timesamples, 1));
2318 				break;
2319 			case RND_FLAG_COLLECT_VALUE:
2320 				atomic_store_relaxed(&rc->rc_datasamples,
2321 				    add_sat(rc->rc_datasamples, 1));
2322 				break;
2323 			}
2324 			percpu_putref(rs->state);
2325 		}
2326 	}
2327 }
2328 
2329 /*
2330  * rnd_add_data_sync(rs, buf, len, entropybits)
2331  *
2332  *	Same as rnd_add_data.  Originally used in rndsource callbacks,
2333  *	to break an unnecessary cycle; no longer really needed.
2334  */
2335 void
2336 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
2337     uint32_t entropybits)
2338 {
2339 
2340 	rnd_add_data(rs, buf, len, entropybits);
2341 }
2342 
2343 /*
2344  * rndsource_entropybits(rs)
2345  *
2346  *	Return approximately the number of bits of entropy that have
2347  *	been contributed via rs so far.  Approximate if other CPUs may
2348  *	be calling rnd_add_data concurrently.
2349  */
2350 static unsigned
2351 rndsource_entropybits(struct krndsource *rs)
2352 {
2353 	unsigned nbits = rs->total;
2354 
2355 	KASSERT(!cold);
2356 	KASSERT(rnd_sources_locked());
2357 	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
2358 	return nbits;
2359 }
2360 
2361 static void
2362 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2363 {
2364 	struct rndsource_cpu *rc = ptr;
2365 	unsigned *nbitsp = cookie;
2366 	unsigned cpu_nbits;
2367 
2368 	cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
2369 	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
2370 }
2371 
2372 /*
2373  * rndsource_to_user(rs, urs)
2374  *
2375  *	Copy a description of rs out to urs for userland.
2376  */
2377 static void
2378 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
2379 {
2380 
2381 	KASSERT(!cold);
2382 	KASSERT(rnd_sources_locked());
2383 
2384 	/* Avoid kernel memory disclosure.  */
2385 	memset(urs, 0, sizeof(*urs));
2386 
2387 	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
2388 	strlcpy(urs->name, rs->name, sizeof(urs->name));
2389 	urs->total = rndsource_entropybits(rs);
2390 	urs->type = rs->type;
2391 	urs->flags = atomic_load_relaxed(&rs->flags);
2392 }
2393 
2394 /*
2395  * rndsource_to_user_est(rs, urse)
2396  *
2397  *	Copy a description of rs and estimation statistics out to urse
2398  *	for userland.
2399  */
2400 static void
2401 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
2402 {
2403 
2404 	KASSERT(!cold);
2405 	KASSERT(rnd_sources_locked());
2406 
2407 	/* Avoid kernel memory disclosure.  */
2408 	memset(urse, 0, sizeof(*urse));
2409 
2410 	/* Copy out the rndsource description.  */
2411 	rndsource_to_user(rs, &urse->rt);
2412 
2413 	/* Gather the statistics.  */
2414 	urse->dt_samples = rs->time_delta.insamples;
2415 	urse->dt_total = 0;
2416 	urse->dv_samples = rs->value_delta.insamples;
2417 	urse->dv_total = urse->rt.total;
2418 	percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
2419 }
2420 
2421 static void
2422 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2423 {
2424 	struct rndsource_cpu *rc = ptr;
2425 	rndsource_est_t *urse = cookie;
2426 
2427 	urse->dt_samples = add_sat(urse->dt_samples,
2428 	    atomic_load_relaxed(&rc->rc_timesamples));
2429 	urse->dv_samples = add_sat(urse->dv_samples,
2430 	    atomic_load_relaxed(&rc->rc_datasamples));
2431 }
2432 
2433 /*
2434  * entropy_reset_xc(arg1, arg2)
2435  *
2436  *	Reset the current CPU's pending entropy to zero.
2437  */
2438 static void
2439 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
2440 {
2441 	uint32_t extra = entropy_timer();
2442 	struct entropy_cpu_lock lock;
2443 	struct entropy_cpu *ec;
2444 
2445 	/*
2446 	 * With the per-CPU state locked, zero the pending count and
2447 	 * enter a cycle count for fun.
2448 	 */
2449 	ec = entropy_cpu_get(&lock);
2450 	ec->ec_bitspending = 0;
2451 	ec->ec_samplespending = 0;
2452 	entpool_enter(ec->ec_pool, &extra, sizeof extra);
2453 	entropy_cpu_put(&lock, ec);
2454 }
2455 
2456 /*
2457  * entropy_reset()
2458  *
2459  *	Assume the entropy pool has been exposed, e.g. because the VM
2460  *	has been cloned.  Nix all the pending entropy and set the
2461  *	needed to maximum.
2462  */
2463 void
2464 entropy_reset(void)
2465 {
2466 
2467 	xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
2468 	mutex_enter(&E->lock);
2469 	E->bitspending = 0;
2470 	E->samplespending = 0;
2471 	atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS);
2472 	atomic_store_relaxed(&E->samplesneeded, MINSAMPLES);
2473 	E->consolidate = false;
2474 	mutex_exit(&E->lock);
2475 }
2476 
2477 /*
2478  * entropy_ioctl(cmd, data)
2479  *
2480  *	Handle various /dev/random ioctl queries.
2481  */
2482 int
2483 entropy_ioctl(unsigned long cmd, void *data)
2484 {
2485 	struct krndsource *rs;
2486 	bool privileged;
2487 	int error;
2488 
2489 	KASSERT(!cold);
2490 
2491 	/* Verify user's authorization to perform the ioctl.  */
2492 	switch (cmd) {
2493 	case RNDGETENTCNT:
2494 	case RNDGETPOOLSTAT:
2495 	case RNDGETSRCNUM:
2496 	case RNDGETSRCNAME:
2497 	case RNDGETESTNUM:
2498 	case RNDGETESTNAME:
2499 		error = kauth_authorize_device(kauth_cred_get(),
2500 		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
2501 		break;
2502 	case RNDCTL:
2503 		error = kauth_authorize_device(kauth_cred_get(),
2504 		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
2505 		break;
2506 	case RNDADDDATA:
2507 		error = kauth_authorize_device(kauth_cred_get(),
2508 		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
2509 		/* Ascertain whether the user's inputs should be counted.  */
2510 		if (kauth_authorize_device(kauth_cred_get(),
2511 			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
2512 			NULL, NULL, NULL, NULL) == 0)
2513 			privileged = true;
2514 		break;
2515 	default: {
2516 		/*
2517 		 * XXX Hack to avoid changing module ABI so this can be
2518 		 * pulled up.  Later, we can just remove the argument.
2519 		 */
2520 		static const struct fileops fops = {
2521 			.fo_ioctl = rnd_system_ioctl,
2522 		};
2523 		struct file f = {
2524 			.f_ops = &fops,
2525 		};
2526 		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
2527 		    enosys(), error);
2528 #if defined(_LP64)
2529 		if (error == ENOSYS)
2530 			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
2531 			    enosys(), error);
2532 #endif
2533 		if (error == ENOSYS)
2534 			error = ENOTTY;
2535 		break;
2536 	}
2537 	}
2538 
2539 	/* If anything went wrong with authorization, stop here.  */
2540 	if (error)
2541 		return error;
2542 
2543 	/* Dispatch on the command.  */
2544 	switch (cmd) {
2545 	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
2546 		uint32_t *countp = data;
2547 
2548 		mutex_enter(&E->lock);
2549 		*countp = MINENTROPYBITS - E->bitsneeded;
2550 		mutex_exit(&E->lock);
2551 
2552 		break;
2553 	}
2554 	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
2555 		rndpoolstat_t *pstat = data;
2556 
2557 		mutex_enter(&E->lock);
2558 
2559 		/* parameters */
2560 		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
2561 		pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */
2562 		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
2563 
2564 		/* state */
2565 		pstat->added = 0; /* XXX total entropy_enter count */
2566 		pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */
2567 		pstat->removed = 0; /* XXX total entropy_extract count */
2568 		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
2569 
2570 		/*
2571 		 * This used to be bits of data fabricated in some
2572 		 * sense; we'll take it to mean number of samples,
2573 		 * excluding the bits of entropy from HWRNG or seed.
2574 		 */
2575 		pstat->generated = MINSAMPLES - E->samplesneeded;
2576 		pstat->generated -= MIN(pstat->generated, pstat->curentropy);
2577 
2578 		mutex_exit(&E->lock);
2579 		break;
2580 	}
2581 	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
2582 		rndstat_t *stat = data;
2583 		uint32_t start = 0, i = 0;
2584 
2585 		/* Skip if none requested; fail if too many requested.  */
2586 		if (stat->count == 0)
2587 			break;
2588 		if (stat->count > RND_MAXSTATCOUNT)
2589 			return EINVAL;
2590 
2591 		/*
2592 		 * Under the lock, find the first one, copy out as many
2593 		 * as requested, and report how many we copied out.
2594 		 */
2595 		mutex_enter(&E->lock);
2596 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2597 		if (error) {
2598 			mutex_exit(&E->lock);
2599 			return error;
2600 		}
2601 		LIST_FOREACH(rs, &E->sources, list) {
2602 			if (start++ == stat->start)
2603 				break;
2604 		}
2605 		while (i < stat->count && rs != NULL) {
2606 			mutex_exit(&E->lock);
2607 			rndsource_to_user(rs, &stat->source[i++]);
2608 			mutex_enter(&E->lock);
2609 			rs = LIST_NEXT(rs, list);
2610 		}
2611 		KASSERT(i <= stat->count);
2612 		stat->count = i;
2613 		rnd_unlock_sources();
2614 		mutex_exit(&E->lock);
2615 		break;
2616 	}
2617 	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
2618 		rndstat_est_t *estat = data;
2619 		uint32_t start = 0, i = 0;
2620 
2621 		/* Skip if none requested; fail if too many requested.  */
2622 		if (estat->count == 0)
2623 			break;
2624 		if (estat->count > RND_MAXSTATCOUNT)
2625 			return EINVAL;
2626 
2627 		/*
2628 		 * Under the lock, find the first one, copy out as many
2629 		 * as requested, and report how many we copied out.
2630 		 */
2631 		mutex_enter(&E->lock);
2632 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2633 		if (error) {
2634 			mutex_exit(&E->lock);
2635 			return error;
2636 		}
2637 		LIST_FOREACH(rs, &E->sources, list) {
2638 			if (start++ == estat->start)
2639 				break;
2640 		}
2641 		while (i < estat->count && rs != NULL) {
2642 			mutex_exit(&E->lock);
2643 			rndsource_to_user_est(rs, &estat->source[i++]);
2644 			mutex_enter(&E->lock);
2645 			rs = LIST_NEXT(rs, list);
2646 		}
2647 		KASSERT(i <= estat->count);
2648 		estat->count = i;
2649 		rnd_unlock_sources();
2650 		mutex_exit(&E->lock);
2651 		break;
2652 	}
2653 	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
2654 		rndstat_name_t *nstat = data;
2655 		const size_t n = sizeof(rs->name);
2656 
2657 		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
2658 
2659 		/*
2660 		 * Under the lock, search by name.  If found, copy it
2661 		 * out; if not found, fail with ENOENT.
2662 		 */
2663 		mutex_enter(&E->lock);
2664 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2665 		if (error) {
2666 			mutex_exit(&E->lock);
2667 			return error;
2668 		}
2669 		LIST_FOREACH(rs, &E->sources, list) {
2670 			if (strncmp(rs->name, nstat->name, n) == 0)
2671 				break;
2672 		}
2673 		if (rs != NULL) {
2674 			mutex_exit(&E->lock);
2675 			rndsource_to_user(rs, &nstat->source);
2676 			mutex_enter(&E->lock);
2677 		} else {
2678 			error = ENOENT;
2679 		}
2680 		rnd_unlock_sources();
2681 		mutex_exit(&E->lock);
2682 		break;
2683 	}
2684 	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
2685 		rndstat_est_name_t *enstat = data;
2686 		const size_t n = sizeof(rs->name);
2687 
2688 		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
2689 
2690 		/*
2691 		 * Under the lock, search by name.  If found, copy it
2692 		 * out; if not found, fail with ENOENT.
2693 		 */
2694 		mutex_enter(&E->lock);
2695 		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2696 		if (error) {
2697 			mutex_exit(&E->lock);
2698 			return error;
2699 		}
2700 		LIST_FOREACH(rs, &E->sources, list) {
2701 			if (strncmp(rs->name, enstat->name, n) == 0)
2702 				break;
2703 		}
2704 		if (rs != NULL) {
2705 			mutex_exit(&E->lock);
2706 			rndsource_to_user_est(rs, &enstat->source);
2707 			mutex_enter(&E->lock);
2708 		} else {
2709 			error = ENOENT;
2710 		}
2711 		rnd_unlock_sources();
2712 		mutex_exit(&E->lock);
2713 		break;
2714 	}
2715 	case RNDCTL: {		/* Modify entropy source flags.  */
2716 		rndctl_t *rndctl = data;
2717 		const size_t n = sizeof(rs->name);
2718 		uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2719 		uint32_t flags;
2720 		bool reset = false, request = false;
2721 
2722 		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
2723 
2724 		/* Whitelist the flags that user can change.  */
2725 		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2726 
2727 		/*
2728 		 * For each matching rndsource, either by type if
2729 		 * specified or by name if not, set the masked flags.
2730 		 */
2731 		mutex_enter(&E->lock);
2732 		LIST_FOREACH(rs, &E->sources, list) {
2733 			if (rndctl->type != 0xff) {
2734 				if (rs->type != rndctl->type)
2735 					continue;
2736 			} else if (rndctl->name[0] != '\0') {
2737 				if (strncmp(rs->name, rndctl->name, n) != 0)
2738 					continue;
2739 			}
2740 			flags = rs->flags & ~rndctl->mask;
2741 			flags |= rndctl->flags & rndctl->mask;
2742 			if ((rs->flags & resetflags) == 0 &&
2743 			    (flags & resetflags) != 0)
2744 				reset = true;
2745 			if ((rs->flags ^ flags) & resetflags)
2746 				request = true;
2747 			atomic_store_relaxed(&rs->flags, flags);
2748 		}
2749 		mutex_exit(&E->lock);
2750 
2751 		/*
2752 		 * If we disabled estimation or collection, nix all the
2753 		 * pending entropy and set needed to the maximum.
2754 		 */
2755 		if (reset)
2756 			entropy_reset();
2757 
2758 		/*
2759 		 * If we changed any of the estimation or collection
2760 		 * flags, request new samples from everyone -- either
2761 		 * to make up for what we just lost, or to get new
2762 		 * samples from what we just added.
2763 		 *
2764 		 * Failing on signal, while waiting for another process
2765 		 * to finish requesting entropy, is OK here even though
2766 		 * we have committed side effects, because this ioctl
2767 		 * command is idempotent, so repeating it is safe.
2768 		 */
2769 		if (request)
2770 			error = entropy_gather();
2771 		break;
2772 	}
2773 	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
2774 		rnddata_t *rdata = data;
2775 		unsigned entropybits = 0;
2776 
2777 		if (!atomic_load_relaxed(&entropy_collection))
2778 			break;	/* thanks but no thanks */
2779 		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
2780 			return EINVAL;
2781 
2782 		/*
2783 		 * This ioctl serves as the userland alternative a
2784 		 * bootloader-provided seed -- typically furnished by
2785 		 * /etc/rc.d/random_seed.  We accept the user's entropy
2786 		 * claim only if
2787 		 *
2788 		 * (a) the user is privileged, and
2789 		 * (b) we have not entered a bootloader seed.
2790 		 *
2791 		 * under the assumption that the user may use this to
2792 		 * load a seed from disk that we have already loaded
2793 		 * from the bootloader, so we don't double-count it.
2794 		 */
2795 		if (privileged && rdata->entropy && rdata->len) {
2796 			mutex_enter(&E->lock);
2797 			if (!E->seeded) {
2798 				entropybits = MIN(rdata->entropy,
2799 				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
2800 				E->seeded = true;
2801 			}
2802 			mutex_exit(&E->lock);
2803 		}
2804 
2805 		/* Enter the data and consolidate entropy.  */
2806 		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
2807 		    entropybits);
2808 		error = entropy_consolidate();
2809 		break;
2810 	}
2811 	default:
2812 		error = ENOTTY;
2813 	}
2814 
2815 	/* Return any error that may have come up.  */
2816 	return error;
2817 }
2818 
2819 /* Legacy entry points */
2820 
2821 void
2822 rnd_seed(void *seed, size_t len)
2823 {
2824 
2825 	if (len != sizeof(rndsave_t)) {
2826 		printf("entropy: invalid seed length: %zu,"
2827 		    " expected sizeof(rndsave_t) = %zu\n",
2828 		    len, sizeof(rndsave_t));
2829 		return;
2830 	}
2831 	entropy_seed(seed);
2832 }
2833 
2834 void
2835 rnd_init(void)
2836 {
2837 
2838 	entropy_init();
2839 }
2840 
2841 void
2842 rnd_init_softint(void)
2843 {
2844 
2845 	entropy_init_late();
2846 	entropy_bootrequest();
2847 }
2848 
2849 int
2850 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
2851 {
2852 
2853 	return entropy_ioctl(cmd, data);
2854 }
2855