1*4dd1804bSriastradh /* $NetBSD: kern_entropy.c,v 1.72 2024/08/27 00:56:47 riastradh Exp $ */ 25084c1b5Sriastradh 35084c1b5Sriastradh /*- 45084c1b5Sriastradh * Copyright (c) 2019 The NetBSD Foundation, Inc. 55084c1b5Sriastradh * All rights reserved. 65084c1b5Sriastradh * 75084c1b5Sriastradh * This code is derived from software contributed to The NetBSD Foundation 85084c1b5Sriastradh * by Taylor R. Campbell. 95084c1b5Sriastradh * 105084c1b5Sriastradh * Redistribution and use in source and binary forms, with or without 115084c1b5Sriastradh * modification, are permitted provided that the following conditions 125084c1b5Sriastradh * are met: 135084c1b5Sriastradh * 1. Redistributions of source code must retain the above copyright 145084c1b5Sriastradh * notice, this list of conditions and the following disclaimer. 155084c1b5Sriastradh * 2. Redistributions in binary form must reproduce the above copyright 165084c1b5Sriastradh * notice, this list of conditions and the following disclaimer in the 175084c1b5Sriastradh * documentation and/or other materials provided with the distribution. 185084c1b5Sriastradh * 195084c1b5Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 205084c1b5Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 215084c1b5Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 225084c1b5Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 235084c1b5Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 245084c1b5Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 255084c1b5Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 265084c1b5Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 275084c1b5Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 285084c1b5Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 295084c1b5Sriastradh * POSSIBILITY OF SUCH DAMAGE. 305084c1b5Sriastradh */ 315084c1b5Sriastradh 325084c1b5Sriastradh /* 335084c1b5Sriastradh * Entropy subsystem 345084c1b5Sriastradh * 355084c1b5Sriastradh * * Each CPU maintains a per-CPU entropy pool so that gathering 365084c1b5Sriastradh * entropy requires no interprocessor synchronization, except 375084c1b5Sriastradh * early at boot when we may be scrambling to gather entropy as 385084c1b5Sriastradh * soon as possible. 395084c1b5Sriastradh * 405084c1b5Sriastradh * - entropy_enter gathers entropy and never drops it on the 415084c1b5Sriastradh * floor, at the cost of sometimes having to do cryptography. 425084c1b5Sriastradh * 435084c1b5Sriastradh * - entropy_enter_intr gathers entropy or drops it on the 445084c1b5Sriastradh * floor, with low latency. Work to stir the pool or kick the 455084c1b5Sriastradh * housekeeping thread is scheduled in soft interrupts. 465084c1b5Sriastradh * 475084c1b5Sriastradh * * entropy_enter immediately enters into the global pool if it 485084c1b5Sriastradh * can transition to full entropy in one swell foop. Otherwise, 495084c1b5Sriastradh * it defers to a housekeeping thread that consolidates entropy, 505084c1b5Sriastradh * but only when the CPUs collectively have full entropy, in 515084c1b5Sriastradh * order to mitigate iterative-guessing attacks. 525084c1b5Sriastradh * 535084c1b5Sriastradh * * The entropy housekeeping thread continues to consolidate 545084c1b5Sriastradh * entropy even after we think we have full entropy, in case we 555084c1b5Sriastradh * are wrong, but is limited to one discretionary consolidation 565084c1b5Sriastradh * per minute, and only when new entropy is actually coming in, 575084c1b5Sriastradh * to limit performance impact. 585084c1b5Sriastradh * 595084c1b5Sriastradh * * The entropy epoch is the number that changes when we 605084c1b5Sriastradh * transition from partial entropy to full entropy, so that 615084c1b5Sriastradh * users can easily determine when to reseed. This also 625084c1b5Sriastradh * facilitates an operator explicitly causing everything to 63bbed1747Sriastradh * reseed by sysctl -w kern.entropy.consolidate=1. 645084c1b5Sriastradh * 655084c1b5Sriastradh * * Entropy depletion is available for testing (or if you're into 665084c1b5Sriastradh * that sort of thing), with sysctl -w kern.entropy.depletion=1; 675084c1b5Sriastradh * the logic to support it is small, to minimize chance of bugs. 683586ae1dSriastradh * 693586ae1dSriastradh * * While cold, a single global entropy pool is available for 703586ae1dSriastradh * entering and extracting, serialized through splhigh/splx. 713586ae1dSriastradh * The per-CPU entropy pool data structures are initialized in 723586ae1dSriastradh * entropy_init and entropy_init_late (separated mainly for 733586ae1dSriastradh * hysterical raisins at this point), but are not used until the 743586ae1dSriastradh * system is warm, at which point access to the global entropy 753586ae1dSriastradh * pool is limited to thread and softint context and serialized 763586ae1dSriastradh * by E->lock. 775084c1b5Sriastradh */ 785084c1b5Sriastradh 795084c1b5Sriastradh #include <sys/cdefs.h> 80*4dd1804bSriastradh __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.72 2024/08/27 00:56:47 riastradh Exp $"); 815084c1b5Sriastradh 825084c1b5Sriastradh #include <sys/param.h> 835084c1b5Sriastradh #include <sys/types.h> 845084c1b5Sriastradh #include <sys/atomic.h> 855084c1b5Sriastradh #include <sys/compat_stub.h> 865084c1b5Sriastradh #include <sys/condvar.h> 875084c1b5Sriastradh #include <sys/cpu.h> 885084c1b5Sriastradh #include <sys/entropy.h> 895084c1b5Sriastradh #include <sys/errno.h> 905084c1b5Sriastradh #include <sys/evcnt.h> 915084c1b5Sriastradh #include <sys/event.h> 925084c1b5Sriastradh #include <sys/file.h> 935084c1b5Sriastradh #include <sys/intr.h> 945084c1b5Sriastradh #include <sys/kauth.h> 955084c1b5Sriastradh #include <sys/kernel.h> 965084c1b5Sriastradh #include <sys/kmem.h> 975084c1b5Sriastradh #include <sys/kthread.h> 982b8e15f8Sriastradh #include <sys/lwp.h> 995084c1b5Sriastradh #include <sys/module_hook.h> 1005084c1b5Sriastradh #include <sys/mutex.h> 1015084c1b5Sriastradh #include <sys/percpu.h> 1025084c1b5Sriastradh #include <sys/poll.h> 1032b8e15f8Sriastradh #include <sys/proc.h> 1045084c1b5Sriastradh #include <sys/queue.h> 1057271b779Sjmcneill #include <sys/reboot.h> 1065084c1b5Sriastradh #include <sys/rnd.h> /* legacy kernel API */ 1075084c1b5Sriastradh #include <sys/rndio.h> /* userland ioctl interface */ 1085084c1b5Sriastradh #include <sys/rndsource.h> /* kernel rndsource driver API */ 1095084c1b5Sriastradh #include <sys/select.h> 1105084c1b5Sriastradh #include <sys/selinfo.h> 1115084c1b5Sriastradh #include <sys/sha1.h> /* for boot seed checksum */ 1125084c1b5Sriastradh #include <sys/stdint.h> 1135084c1b5Sriastradh #include <sys/sysctl.h> 1140c4aa856Sriastradh #include <sys/syslog.h> 1155084c1b5Sriastradh #include <sys/systm.h> 1165084c1b5Sriastradh #include <sys/time.h> 1175084c1b5Sriastradh #include <sys/xcall.h> 1185084c1b5Sriastradh 1195084c1b5Sriastradh #include <lib/libkern/entpool.h> 1205084c1b5Sriastradh 1215084c1b5Sriastradh #include <machine/limits.h> 1225084c1b5Sriastradh 1235084c1b5Sriastradh #ifdef __HAVE_CPU_COUNTER 1245084c1b5Sriastradh #include <machine/cpu_counter.h> 1255084c1b5Sriastradh #endif 1265084c1b5Sriastradh 12796b2c7deSriastradh #define MINENTROPYBYTES ENTROPY_CAPACITY 12896b2c7deSriastradh #define MINENTROPYBITS (MINENTROPYBYTES*NBBY) 12996b2c7deSriastradh #define MINSAMPLES (2*MINENTROPYBITS) 13096b2c7deSriastradh 1315084c1b5Sriastradh /* 1325084c1b5Sriastradh * struct entropy_cpu 1335084c1b5Sriastradh * 1345084c1b5Sriastradh * Per-CPU entropy state. The pool is allocated separately 1355084c1b5Sriastradh * because percpu(9) sometimes moves per-CPU objects around 1365084c1b5Sriastradh * without zeroing them, which would lead to unwanted copies of 137634b9650Sandvar * sensitive secrets. The evcnt is allocated separately because 1385084c1b5Sriastradh * evcnt(9) assumes it stays put in memory. 1395084c1b5Sriastradh */ 1405084c1b5Sriastradh struct entropy_cpu { 141e2caead1Sriastradh struct entropy_cpu_evcnt { 142e2caead1Sriastradh struct evcnt softint; 143e2caead1Sriastradh struct evcnt intrdrop; 144e2caead1Sriastradh struct evcnt intrtrunc; 145e2caead1Sriastradh } *ec_evcnt; 1465084c1b5Sriastradh struct entpool *ec_pool; 14796b2c7deSriastradh unsigned ec_bitspending; 14896b2c7deSriastradh unsigned ec_samplespending; 1495084c1b5Sriastradh bool ec_locked; 1505084c1b5Sriastradh }; 1515084c1b5Sriastradh 1525084c1b5Sriastradh /* 153450311ecSriastradh * struct entropy_cpu_lock 154450311ecSriastradh * 155450311ecSriastradh * State for locking the per-CPU entropy state. 156450311ecSriastradh */ 157450311ecSriastradh struct entropy_cpu_lock { 158450311ecSriastradh int ecl_s; 159a355028fSad long ecl_pctr; 160450311ecSriastradh }; 161450311ecSriastradh 162450311ecSriastradh /* 1635084c1b5Sriastradh * struct rndsource_cpu 1645084c1b5Sriastradh * 1655084c1b5Sriastradh * Per-CPU rndsource state. 1665084c1b5Sriastradh */ 1675084c1b5Sriastradh struct rndsource_cpu { 1683f5d9c7dSriastradh unsigned rc_entropybits; 1693f5d9c7dSriastradh unsigned rc_timesamples; 1703f5d9c7dSriastradh unsigned rc_datasamples; 17196b2c7deSriastradh rnd_delta_t rc_timedelta; 1725084c1b5Sriastradh }; 1735084c1b5Sriastradh 1745084c1b5Sriastradh /* 1755084c1b5Sriastradh * entropy_global (a.k.a. E for short in this file) 1765084c1b5Sriastradh * 1775084c1b5Sriastradh * Global entropy state. Writes protected by the global lock. 1785084c1b5Sriastradh * Some fields, marked (A), can be read outside the lock, and are 1795084c1b5Sriastradh * maintained with atomic_load/store_relaxed. 1805084c1b5Sriastradh */ 1815084c1b5Sriastradh struct { 1825084c1b5Sriastradh kmutex_t lock; /* covers all global state */ 1835084c1b5Sriastradh struct entpool pool; /* global pool for extraction */ 18496b2c7deSriastradh unsigned bitsneeded; /* (A) needed globally */ 18596b2c7deSriastradh unsigned bitspending; /* pending in per-CPU pools */ 18696b2c7deSriastradh unsigned samplesneeded; /* (A) needed globally */ 18796b2c7deSriastradh unsigned samplespending; /* pending in per-CPU pools */ 1885084c1b5Sriastradh unsigned timestamp; /* (A) time of last consolidation */ 1895084c1b5Sriastradh unsigned epoch; /* (A) changes when needed -> 0 */ 1905084c1b5Sriastradh kcondvar_t cv; /* notifies state changes */ 1915084c1b5Sriastradh struct selinfo selq; /* notifies needed -> 0 */ 192708a423cSriastradh struct lwp *sourcelock; /* lock on list of sources */ 19336a480a1Sriastradh kcondvar_t sourcelock_cv; /* notifies sourcelock release */ 1945084c1b5Sriastradh LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 1955084c1b5Sriastradh bool consolidate; /* kick thread to consolidate */ 1965084c1b5Sriastradh bool seed_rndsource; /* true if seed source is attached */ 1975084c1b5Sriastradh bool seeded; /* true if seed file already loaded */ 1985084c1b5Sriastradh } entropy_global __cacheline_aligned = { 1995084c1b5Sriastradh /* Fields that must be initialized when the kernel is loaded. */ 20096b2c7deSriastradh .bitsneeded = MINENTROPYBITS, 20196b2c7deSriastradh .samplesneeded = MINSAMPLES, 202e0635d72Sriastradh .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 2035084c1b5Sriastradh .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 2045084c1b5Sriastradh }; 2055084c1b5Sriastradh 2065084c1b5Sriastradh #define E (&entropy_global) /* declutter */ 2075084c1b5Sriastradh 2085084c1b5Sriastradh /* Read-mostly globals */ 2095084c1b5Sriastradh static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 2105084c1b5Sriastradh static void *entropy_sih __read_mostly; /* softint handler */ 2115084c1b5Sriastradh static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 2125084c1b5Sriastradh 2135084c1b5Sriastradh static struct krndsource seed_rndsource __read_mostly; 2145084c1b5Sriastradh 2155084c1b5Sriastradh /* 2165084c1b5Sriastradh * Event counters 2175084c1b5Sriastradh * 2185084c1b5Sriastradh * Must be careful with adding these because they can serve as 2195084c1b5Sriastradh * side channels. 2205084c1b5Sriastradh */ 2215084c1b5Sriastradh static struct evcnt entropy_discretionary_evcnt = 2225084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 2235084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 2245084c1b5Sriastradh static struct evcnt entropy_immediate_evcnt = 2255084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 2265084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 2275084c1b5Sriastradh static struct evcnt entropy_partial_evcnt = 2285084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 2295084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 2305084c1b5Sriastradh static struct evcnt entropy_consolidate_evcnt = 2315084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 2325084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 2335084c1b5Sriastradh static struct evcnt entropy_extract_fail_evcnt = 2345084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 2355084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 2365084c1b5Sriastradh static struct evcnt entropy_request_evcnt = 2375084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 2385084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_request_evcnt); 2395084c1b5Sriastradh static struct evcnt entropy_deplete_evcnt = 2405084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 2415084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 2425084c1b5Sriastradh static struct evcnt entropy_notify_evcnt = 2435084c1b5Sriastradh EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 2445084c1b5Sriastradh EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 2455084c1b5Sriastradh 2465084c1b5Sriastradh /* Sysctl knobs */ 2479dc4826fSriastradh static bool entropy_collection = 1; 2489dc4826fSriastradh static bool entropy_depletion = 0; /* Silly! */ 2495084c1b5Sriastradh 2505084c1b5Sriastradh static const struct sysctlnode *entropy_sysctlroot; 2515084c1b5Sriastradh static struct sysctllog *entropy_sysctllog; 2525084c1b5Sriastradh 2535084c1b5Sriastradh /* Forward declarations */ 2545084c1b5Sriastradh static void entropy_init_cpu(void *, void *, struct cpu_info *); 2555084c1b5Sriastradh static void entropy_fini_cpu(void *, void *, struct cpu_info *); 2565084c1b5Sriastradh static void entropy_account_cpu(struct entropy_cpu *); 25796b2c7deSriastradh static void entropy_enter(const void *, size_t, unsigned, bool); 25896b2c7deSriastradh static bool entropy_enter_intr(const void *, size_t, unsigned, bool); 2595084c1b5Sriastradh static void entropy_softintr(void *); 2605084c1b5Sriastradh static void entropy_thread(void *); 26196b2c7deSriastradh static bool entropy_pending(void); 2625084c1b5Sriastradh static void entropy_pending_cpu(void *, void *, struct cpu_info *); 263bbed1747Sriastradh static void entropy_do_consolidate(void); 264bbed1747Sriastradh static void entropy_consolidate_xc(void *, void *); 2655084c1b5Sriastradh static void entropy_notify(void); 2665084c1b5Sriastradh static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 267ec335737Sriastradh static int sysctl_entropy_gather(SYSCTLFN_ARGS); 2685084c1b5Sriastradh static void filt_entropy_read_detach(struct knote *); 2695084c1b5Sriastradh static int filt_entropy_read_event(struct knote *, long); 27089444d3fSriastradh static int entropy_request(size_t, int); 2713586ae1dSriastradh static void rnd_add_data_internal(struct krndsource *, const void *, 2723586ae1dSriastradh uint32_t, uint32_t, bool); 2735084c1b5Sriastradh static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 2743586ae1dSriastradh uint32_t, bool, uint32_t, bool); 2755084c1b5Sriastradh static unsigned rndsource_entropybits(struct krndsource *); 2765084c1b5Sriastradh static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 2775084c1b5Sriastradh static void rndsource_to_user(struct krndsource *, rndsource_t *); 2785084c1b5Sriastradh static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 2793f5d9c7dSriastradh static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *); 2805084c1b5Sriastradh 2815084c1b5Sriastradh /* 2825084c1b5Sriastradh * entropy_timer() 2835084c1b5Sriastradh * 2845084c1b5Sriastradh * Cycle counter, time counter, or anything that changes a wee bit 2855084c1b5Sriastradh * unpredictably. 2865084c1b5Sriastradh */ 2875084c1b5Sriastradh static inline uint32_t 2885084c1b5Sriastradh entropy_timer(void) 2895084c1b5Sriastradh { 2905084c1b5Sriastradh struct bintime bt; 2915084c1b5Sriastradh uint32_t v; 2925084c1b5Sriastradh 2935084c1b5Sriastradh /* If we have a CPU cycle counter, use the low 32 bits. */ 2945084c1b5Sriastradh #ifdef __HAVE_CPU_COUNTER 2955084c1b5Sriastradh if (__predict_true(cpu_hascounter())) 2965084c1b5Sriastradh return cpu_counter32(); 2975084c1b5Sriastradh #endif /* __HAVE_CPU_COUNTER */ 2985084c1b5Sriastradh 2995084c1b5Sriastradh /* If we're cold, tough. Can't binuptime while cold. */ 3005084c1b5Sriastradh if (__predict_false(cold)) 3015084c1b5Sriastradh return 0; 3025084c1b5Sriastradh 3035084c1b5Sriastradh /* Fold the 128 bits of binuptime into 32 bits. */ 3045084c1b5Sriastradh binuptime(&bt); 3055084c1b5Sriastradh v = bt.frac; 3065084c1b5Sriastradh v ^= bt.frac >> 32; 3075084c1b5Sriastradh v ^= bt.sec; 3085084c1b5Sriastradh v ^= bt.sec >> 32; 3095084c1b5Sriastradh return v; 3105084c1b5Sriastradh } 3115084c1b5Sriastradh 3125084c1b5Sriastradh static void 3135084c1b5Sriastradh attach_seed_rndsource(void) 3145084c1b5Sriastradh { 3155084c1b5Sriastradh 3163586ae1dSriastradh KASSERT(!cpu_intr_p()); 3173586ae1dSriastradh KASSERT(!cpu_softintr_p()); 3183586ae1dSriastradh KASSERT(cold); 3193586ae1dSriastradh 3205084c1b5Sriastradh /* 3215084c1b5Sriastradh * First called no later than entropy_init, while we are still 3225084c1b5Sriastradh * single-threaded, so no need for RUN_ONCE. 3235084c1b5Sriastradh */ 3243586ae1dSriastradh if (E->seed_rndsource) 3255084c1b5Sriastradh return; 3263586ae1dSriastradh 3275084c1b5Sriastradh rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 3285084c1b5Sriastradh RND_FLAG_COLLECT_VALUE); 3295084c1b5Sriastradh E->seed_rndsource = true; 3305084c1b5Sriastradh } 3315084c1b5Sriastradh 3325084c1b5Sriastradh /* 3335084c1b5Sriastradh * entropy_init() 3345084c1b5Sriastradh * 3355084c1b5Sriastradh * Initialize the entropy subsystem. Panic on failure. 3365084c1b5Sriastradh * 3373586ae1dSriastradh * Requires percpu(9) and sysctl(9) to be initialized. Must run 3383586ae1dSriastradh * while cold. 3395084c1b5Sriastradh */ 3405084c1b5Sriastradh static void 3415084c1b5Sriastradh entropy_init(void) 3425084c1b5Sriastradh { 3435084c1b5Sriastradh uint32_t extra[2]; 3445084c1b5Sriastradh struct krndsource *rs; 3455084c1b5Sriastradh unsigned i = 0; 3465084c1b5Sriastradh 3473586ae1dSriastradh KASSERT(cold); 3485084c1b5Sriastradh 3495084c1b5Sriastradh /* Grab some cycle counts early at boot. */ 3505084c1b5Sriastradh extra[i++] = entropy_timer(); 3515084c1b5Sriastradh 3525084c1b5Sriastradh /* Run the entropy pool cryptography self-test. */ 3535084c1b5Sriastradh if (entpool_selftest() == -1) 3545084c1b5Sriastradh panic("entropy pool crypto self-test failed"); 3555084c1b5Sriastradh 3565084c1b5Sriastradh /* Create the sysctl directory. */ 3575084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 3585084c1b5Sriastradh CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 3595084c1b5Sriastradh SYSCTL_DESCR("Entropy (random number sources) options"), 3605084c1b5Sriastradh NULL, 0, NULL, 0, 3615084c1b5Sriastradh CTL_KERN, CTL_CREATE, CTL_EOL); 3625084c1b5Sriastradh 3635084c1b5Sriastradh /* Create the sysctl knobs. */ 3645084c1b5Sriastradh /* XXX These shouldn't be writable at securelevel>0. */ 3655084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 3665084c1b5Sriastradh CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 3675084c1b5Sriastradh SYSCTL_DESCR("Automatically collect entropy from hardware"), 3685084c1b5Sriastradh NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 3695084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 3705084c1b5Sriastradh CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 3715084c1b5Sriastradh SYSCTL_DESCR("`Deplete' entropy pool when observed"), 3725084c1b5Sriastradh NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 3735084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 3745084c1b5Sriastradh CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 3755084c1b5Sriastradh SYSCTL_DESCR("Trigger entropy consolidation now"), 3765084c1b5Sriastradh sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 377ec335737Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 378ec335737Sriastradh CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 379ec335737Sriastradh SYSCTL_DESCR("Trigger entropy gathering from sources now"), 380ec335737Sriastradh sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 3815084c1b5Sriastradh /* XXX These should maybe not be readable at securelevel>0. */ 3825084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 3835084c1b5Sriastradh CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 38496b2c7deSriastradh "needed", 38596b2c7deSriastradh SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"), 38696b2c7deSriastradh NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL); 3875084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 3885084c1b5Sriastradh CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 38996b2c7deSriastradh "pending", 39096b2c7deSriastradh SYSCTL_DESCR("Number of bits of entropy pending on CPUs"), 39196b2c7deSriastradh NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL); 39296b2c7deSriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 39396b2c7deSriastradh CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 39496b2c7deSriastradh "samplesneeded", 39596b2c7deSriastradh SYSCTL_DESCR("Systemwide entropy deficit (samples)"), 39696b2c7deSriastradh NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL); 39796b2c7deSriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 39896b2c7deSriastradh CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 39996b2c7deSriastradh "samplespending", 40096b2c7deSriastradh SYSCTL_DESCR("Number of samples pending on CPUs"), 40196b2c7deSriastradh NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL); 4025084c1b5Sriastradh sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 403e903dd7bSriastradh CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, 4045084c1b5Sriastradh "epoch", SYSCTL_DESCR("Entropy epoch"), 4055084c1b5Sriastradh NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 4065084c1b5Sriastradh 4075084c1b5Sriastradh /* Initialize the global state for multithreaded operation. */ 4084b3ca98cSriastradh mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); 4095084c1b5Sriastradh cv_init(&E->cv, "entropy"); 4105084c1b5Sriastradh selinit(&E->selq); 41136a480a1Sriastradh cv_init(&E->sourcelock_cv, "entsrclock"); 4125084c1b5Sriastradh 4135084c1b5Sriastradh /* Make sure the seed source is attached. */ 4145084c1b5Sriastradh attach_seed_rndsource(); 4155084c1b5Sriastradh 4165084c1b5Sriastradh /* Note if the bootloader didn't provide a seed. */ 4175084c1b5Sriastradh if (!E->seeded) 4184c8ed8b3Sriastradh aprint_debug("entropy: no seed from bootloader\n"); 4195084c1b5Sriastradh 4205084c1b5Sriastradh /* Allocate the per-CPU records for all early entropy sources. */ 4215084c1b5Sriastradh LIST_FOREACH(rs, &E->sources, list) 4225084c1b5Sriastradh rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 4235084c1b5Sriastradh 4240107837fSriastradh /* Allocate and initialize the per-CPU state. */ 4250107837fSriastradh entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 4260107837fSriastradh entropy_init_cpu, entropy_fini_cpu, NULL); 4270107837fSriastradh 4285084c1b5Sriastradh /* Enter the boot cycle count to get started. */ 4295084c1b5Sriastradh extra[i++] = entropy_timer(); 4305084c1b5Sriastradh KASSERT(i == __arraycount(extra)); 43196b2c7deSriastradh entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false); 4325084c1b5Sriastradh explicit_memset(extra, 0, sizeof extra); 433ceeae26cSriastradh } 434ceeae26cSriastradh 4355084c1b5Sriastradh /* 4365084c1b5Sriastradh * entropy_init_late() 4375084c1b5Sriastradh * 4385084c1b5Sriastradh * Late initialization. Panic on failure. 4395084c1b5Sriastradh * 4405084c1b5Sriastradh * Requires CPUs to have been detected and LWPs to have started. 4413586ae1dSriastradh * Must run while cold. 4425084c1b5Sriastradh */ 4435084c1b5Sriastradh static void 4445084c1b5Sriastradh entropy_init_late(void) 4455084c1b5Sriastradh { 4465084c1b5Sriastradh int error; 4475084c1b5Sriastradh 4483586ae1dSriastradh KASSERT(cold); 4495084c1b5Sriastradh 4505084c1b5Sriastradh /* 4515084c1b5Sriastradh * Establish the softint at the highest softint priority level. 4525084c1b5Sriastradh * Must happen after CPU detection. 4535084c1b5Sriastradh */ 4543586ae1dSriastradh entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 4555084c1b5Sriastradh &entropy_softintr, NULL); 4563586ae1dSriastradh if (entropy_sih == NULL) 4575084c1b5Sriastradh panic("unable to establish entropy softint"); 4585084c1b5Sriastradh 4595084c1b5Sriastradh /* 4605084c1b5Sriastradh * Create the entropy housekeeping thread. Must happen after 4615084c1b5Sriastradh * lwpinit. 4625084c1b5Sriastradh */ 4635084c1b5Sriastradh error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 4645084c1b5Sriastradh entropy_thread, NULL, &entropy_lwp, "entbutler"); 4655084c1b5Sriastradh if (error) 4665084c1b5Sriastradh panic("unable to create entropy housekeeping thread: %d", 4675084c1b5Sriastradh error); 4685084c1b5Sriastradh } 4695084c1b5Sriastradh 4705084c1b5Sriastradh /* 4715084c1b5Sriastradh * entropy_init_cpu(ptr, cookie, ci) 4725084c1b5Sriastradh * 4735084c1b5Sriastradh * percpu(9) constructor for per-CPU entropy pool. 4745084c1b5Sriastradh */ 4755084c1b5Sriastradh static void 4765084c1b5Sriastradh entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 4775084c1b5Sriastradh { 4785084c1b5Sriastradh struct entropy_cpu *ec = ptr; 479e2caead1Sriastradh const char *cpuname; 4805084c1b5Sriastradh 481e2caead1Sriastradh ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP); 4825084c1b5Sriastradh ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 48396b2c7deSriastradh ec->ec_bitspending = 0; 48496b2c7deSriastradh ec->ec_samplespending = 0; 4855084c1b5Sriastradh ec->ec_locked = false; 4865084c1b5Sriastradh 4870107837fSriastradh /* XXX ci_cpuname may not be initialized early enough. */ 488e2caead1Sriastradh cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; 489e2caead1Sriastradh evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL, 490e2caead1Sriastradh cpuname, "entropy softint"); 491e2caead1Sriastradh evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL, 492e2caead1Sriastradh cpuname, "entropy intrdrop"); 493e2caead1Sriastradh evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL, 494e2caead1Sriastradh cpuname, "entropy intrtrunc"); 4955084c1b5Sriastradh } 4965084c1b5Sriastradh 4975084c1b5Sriastradh /* 4985084c1b5Sriastradh * entropy_fini_cpu(ptr, cookie, ci) 4995084c1b5Sriastradh * 5005084c1b5Sriastradh * percpu(9) destructor for per-CPU entropy pool. 5015084c1b5Sriastradh */ 5025084c1b5Sriastradh static void 5035084c1b5Sriastradh entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 5045084c1b5Sriastradh { 5055084c1b5Sriastradh struct entropy_cpu *ec = ptr; 5065084c1b5Sriastradh 5075084c1b5Sriastradh /* 5085084c1b5Sriastradh * Zero any lingering data. Disclosure of the per-CPU pool 5095084c1b5Sriastradh * shouldn't retroactively affect the security of any keys 5105084c1b5Sriastradh * generated, because entpool(9) erases whatever we have just 5115084c1b5Sriastradh * drawn out of any pool, but better safe than sorry. 5125084c1b5Sriastradh */ 5135084c1b5Sriastradh explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 5145084c1b5Sriastradh 515e2caead1Sriastradh evcnt_detach(&ec->ec_evcnt->intrtrunc); 516e2caead1Sriastradh evcnt_detach(&ec->ec_evcnt->intrdrop); 517e2caead1Sriastradh evcnt_detach(&ec->ec_evcnt->softint); 5185084c1b5Sriastradh 5195084c1b5Sriastradh kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 520e2caead1Sriastradh kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt)); 5215084c1b5Sriastradh } 5225084c1b5Sriastradh 5235084c1b5Sriastradh /* 524450311ecSriastradh * ec = entropy_cpu_get(&lock) 525450311ecSriastradh * entropy_cpu_put(&lock, ec) 526450311ecSriastradh * 527450311ecSriastradh * Lock and unlock the per-CPU entropy state. This only prevents 528450311ecSriastradh * access on the same CPU -- by hard interrupts, by soft 529450311ecSriastradh * interrupts, or by other threads. 530450311ecSriastradh * 531450311ecSriastradh * Blocks soft interrupts and preemption altogether; doesn't block 532450311ecSriastradh * hard interrupts, but causes samples in hard interrupts to be 533450311ecSriastradh * dropped. 534450311ecSriastradh */ 535450311ecSriastradh static struct entropy_cpu * 536450311ecSriastradh entropy_cpu_get(struct entropy_cpu_lock *lock) 537450311ecSriastradh { 538450311ecSriastradh struct entropy_cpu *ec; 539450311ecSriastradh 540450311ecSriastradh ec = percpu_getref(entropy_percpu); 541450311ecSriastradh lock->ecl_s = splsoftserial(); 542450311ecSriastradh KASSERT(!ec->ec_locked); 543450311ecSriastradh ec->ec_locked = true; 544a355028fSad lock->ecl_pctr = lwp_pctr(); 545450311ecSriastradh __insn_barrier(); 546450311ecSriastradh 547450311ecSriastradh return ec; 548450311ecSriastradh } 549450311ecSriastradh 550450311ecSriastradh static void 551450311ecSriastradh entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec) 552450311ecSriastradh { 553450311ecSriastradh 554450311ecSriastradh KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu())); 555450311ecSriastradh KASSERT(ec->ec_locked); 556450311ecSriastradh 557450311ecSriastradh __insn_barrier(); 558a355028fSad KASSERT(lock->ecl_pctr == lwp_pctr()); 559450311ecSriastradh ec->ec_locked = false; 560450311ecSriastradh splx(lock->ecl_s); 561450311ecSriastradh percpu_putref(entropy_percpu); 562450311ecSriastradh } 563450311ecSriastradh 564450311ecSriastradh /* 5655084c1b5Sriastradh * entropy_seed(seed) 5665084c1b5Sriastradh * 5675084c1b5Sriastradh * Seed the entropy pool with seed. Meant to be called as early 5685084c1b5Sriastradh * as possible by the bootloader; may be called before or after 5695084c1b5Sriastradh * entropy_init. Must be called before system reaches userland. 5705084c1b5Sriastradh * Must be called in thread or soft interrupt context, not in hard 5715084c1b5Sriastradh * interrupt context. Must be called at most once. 5725084c1b5Sriastradh * 5735084c1b5Sriastradh * Overwrites the seed in place. Caller may then free the memory. 5745084c1b5Sriastradh */ 5755084c1b5Sriastradh static void 5765084c1b5Sriastradh entropy_seed(rndsave_t *seed) 5775084c1b5Sriastradh { 5785084c1b5Sriastradh SHA1_CTX ctx; 5795084c1b5Sriastradh uint8_t digest[SHA1_DIGEST_LENGTH]; 5805084c1b5Sriastradh bool seeded; 5815084c1b5Sriastradh 5823586ae1dSriastradh KASSERT(!cpu_intr_p()); 5833586ae1dSriastradh KASSERT(!cpu_softintr_p()); 5843586ae1dSriastradh KASSERT(cold); 5853586ae1dSriastradh 5865084c1b5Sriastradh /* 5875084c1b5Sriastradh * Verify the checksum. If the checksum fails, take the data 5885084c1b5Sriastradh * but ignore the entropy estimate -- the file may have been 5895084c1b5Sriastradh * incompletely written with garbage, which is harmless to add 5905084c1b5Sriastradh * but may not be as unpredictable as alleged. 5915084c1b5Sriastradh */ 5925084c1b5Sriastradh SHA1Init(&ctx); 5935084c1b5Sriastradh SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 5945084c1b5Sriastradh SHA1Update(&ctx, seed->data, sizeof(seed->data)); 5955084c1b5Sriastradh SHA1Final(digest, &ctx); 5965084c1b5Sriastradh CTASSERT(sizeof(seed->digest) == sizeof(digest)); 5975084c1b5Sriastradh if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 5985084c1b5Sriastradh printf("entropy: invalid seed checksum\n"); 5995084c1b5Sriastradh seed->entropy = 0; 6005084c1b5Sriastradh } 6013974c5cbSriastradh explicit_memset(&ctx, 0, sizeof ctx); 6025084c1b5Sriastradh explicit_memset(digest, 0, sizeof digest); 6035084c1b5Sriastradh 6043974c5cbSriastradh /* 6053974c5cbSriastradh * If the entropy is insensibly large, try byte-swapping. 6063974c5cbSriastradh * Otherwise assume the file is corrupted and act as though it 6073974c5cbSriastradh * has zero entropy. 6083974c5cbSriastradh */ 6093974c5cbSriastradh if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 6103974c5cbSriastradh seed->entropy = bswap32(seed->entropy); 6113974c5cbSriastradh if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 6123974c5cbSriastradh seed->entropy = 0; 6133974c5cbSriastradh } 6143974c5cbSriastradh 6155084c1b5Sriastradh /* Make sure the seed source is attached. */ 6165084c1b5Sriastradh attach_seed_rndsource(); 6175084c1b5Sriastradh 6185084c1b5Sriastradh /* Test and set E->seeded. */ 6195084c1b5Sriastradh seeded = E->seeded; 6209d3b7ca9Sriastradh E->seeded = (seed->entropy > 0); 6215084c1b5Sriastradh 6225084c1b5Sriastradh /* 6235084c1b5Sriastradh * If we've been seeded, may be re-entering the same seed 6245084c1b5Sriastradh * (e.g., bootloader vs module init, or something). No harm in 6255084c1b5Sriastradh * entering it twice, but it contributes no additional entropy. 6265084c1b5Sriastradh */ 6275084c1b5Sriastradh if (seeded) { 6285084c1b5Sriastradh printf("entropy: double-seeded by bootloader\n"); 6295084c1b5Sriastradh seed->entropy = 0; 6305084c1b5Sriastradh } else { 6319d3b7ca9Sriastradh printf("entropy: entering seed from bootloader" 6329d3b7ca9Sriastradh " with %u bits of entropy\n", (unsigned)seed->entropy); 6335084c1b5Sriastradh } 6345084c1b5Sriastradh 6355084c1b5Sriastradh /* Enter it into the pool and promptly zero it. */ 6365084c1b5Sriastradh rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 6375084c1b5Sriastradh seed->entropy); 6385084c1b5Sriastradh explicit_memset(seed, 0, sizeof(*seed)); 6395084c1b5Sriastradh } 6405084c1b5Sriastradh 6415084c1b5Sriastradh /* 6425084c1b5Sriastradh * entropy_bootrequest() 6435084c1b5Sriastradh * 6445084c1b5Sriastradh * Request entropy from all sources at boot, once config is 6453586ae1dSriastradh * complete and interrupts are running but we are still cold. 6465084c1b5Sriastradh */ 6475084c1b5Sriastradh void 6485084c1b5Sriastradh entropy_bootrequest(void) 6495084c1b5Sriastradh { 65089444d3fSriastradh int error; 6515084c1b5Sriastradh 6523586ae1dSriastradh KASSERT(!cpu_intr_p()); 6533586ae1dSriastradh KASSERT(!cpu_softintr_p()); 6543586ae1dSriastradh KASSERT(cold); 6555084c1b5Sriastradh 6565084c1b5Sriastradh /* 6575084c1b5Sriastradh * Request enough to satisfy the maximum entropy shortage. 6585084c1b5Sriastradh * This is harmless overkill if the bootloader provided a seed. 6595084c1b5Sriastradh */ 66096b2c7deSriastradh error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT); 6613586ae1dSriastradh KASSERTMSG(error == 0, "error=%d", error); 6625084c1b5Sriastradh } 6635084c1b5Sriastradh 6645084c1b5Sriastradh /* 6655084c1b5Sriastradh * entropy_epoch() 6665084c1b5Sriastradh * 6675084c1b5Sriastradh * Returns the current entropy epoch. If this changes, you should 668e0635d72Sriastradh * reseed. If -1, means system entropy has not yet reached full 669e0635d72Sriastradh * entropy or been explicitly consolidated; never reverts back to 670e0635d72Sriastradh * -1. Never zero, so you can always use zero as an uninitialized 671e0635d72Sriastradh * sentinel value meaning `reseed ASAP'. 6725084c1b5Sriastradh * 6735084c1b5Sriastradh * Usage model: 6745084c1b5Sriastradh * 6755084c1b5Sriastradh * struct foo { 6765084c1b5Sriastradh * struct crypto_prng prng; 6775084c1b5Sriastradh * unsigned epoch; 6785084c1b5Sriastradh * } *foo; 6795084c1b5Sriastradh * 6805084c1b5Sriastradh * unsigned epoch = entropy_epoch(); 6815084c1b5Sriastradh * if (__predict_false(epoch != foo->epoch)) { 6825084c1b5Sriastradh * uint8_t seed[32]; 6835084c1b5Sriastradh * if (entropy_extract(seed, sizeof seed, 0) != 0) 6845084c1b5Sriastradh * warn("no entropy"); 6855084c1b5Sriastradh * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 6865084c1b5Sriastradh * foo->epoch = epoch; 6875084c1b5Sriastradh * } 6885084c1b5Sriastradh */ 6895084c1b5Sriastradh unsigned 6905084c1b5Sriastradh entropy_epoch(void) 6915084c1b5Sriastradh { 6925084c1b5Sriastradh 6935084c1b5Sriastradh /* 6945084c1b5Sriastradh * Unsigned int, so no need for seqlock for an atomic read, but 6955084c1b5Sriastradh * make sure we read it afresh each time. 6965084c1b5Sriastradh */ 6975084c1b5Sriastradh return atomic_load_relaxed(&E->epoch); 6985084c1b5Sriastradh } 6995084c1b5Sriastradh 7005084c1b5Sriastradh /* 701bdad8b27Sriastradh * entropy_ready() 702bdad8b27Sriastradh * 703bdad8b27Sriastradh * True if the entropy pool has full entropy. 704bdad8b27Sriastradh */ 705bdad8b27Sriastradh bool 706bdad8b27Sriastradh entropy_ready(void) 707bdad8b27Sriastradh { 708bdad8b27Sriastradh 70996b2c7deSriastradh return atomic_load_relaxed(&E->bitsneeded) == 0; 710bdad8b27Sriastradh } 711bdad8b27Sriastradh 712bdad8b27Sriastradh /* 7135084c1b5Sriastradh * entropy_account_cpu(ec) 7145084c1b5Sriastradh * 7155084c1b5Sriastradh * Consider whether to consolidate entropy into the global pool 7165084c1b5Sriastradh * after we just added some into the current CPU's pending pool. 7175084c1b5Sriastradh * 7185084c1b5Sriastradh * - If this CPU can provide enough entropy now, do so. 7195084c1b5Sriastradh * 7205084c1b5Sriastradh * - If this and whatever else is available on other CPUs can 7215084c1b5Sriastradh * provide enough entropy, kick the consolidation thread. 7225084c1b5Sriastradh * 7235084c1b5Sriastradh * - Otherwise, do as little as possible, except maybe consolidate 7245084c1b5Sriastradh * entropy at most once a minute. 7255084c1b5Sriastradh * 7265084c1b5Sriastradh * Caller must be bound to a CPU and therefore have exclusive 7275084c1b5Sriastradh * access to ec. Will acquire and release the global lock. 7285084c1b5Sriastradh */ 7295084c1b5Sriastradh static void 7305084c1b5Sriastradh entropy_account_cpu(struct entropy_cpu *ec) 7315084c1b5Sriastradh { 732260710e2Sriastradh struct entropy_cpu_lock lock; 733260710e2Sriastradh struct entropy_cpu *ec0; 73496b2c7deSriastradh unsigned bitsdiff, samplesdiff; 7355084c1b5Sriastradh 7363586ae1dSriastradh KASSERT(!cpu_intr_p()); 7373586ae1dSriastradh KASSERT(!cold); 7388f575ec7Sriastradh KASSERT(curlwp->l_pflag & LP_BOUND); 7395084c1b5Sriastradh 7405084c1b5Sriastradh /* 7415084c1b5Sriastradh * If there's no entropy needed, and entropy has been 7425084c1b5Sriastradh * consolidated in the last minute, do nothing. 7435084c1b5Sriastradh */ 74496b2c7deSriastradh if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) && 7455084c1b5Sriastradh __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 7465084c1b5Sriastradh __predict_true((time_uptime - E->timestamp) <= 60)) 7475084c1b5Sriastradh return; 7485084c1b5Sriastradh 749260710e2Sriastradh /* 750260710e2Sriastradh * Consider consolidation, under the global lock and with the 751260710e2Sriastradh * per-CPU state locked. 752260710e2Sriastradh */ 7535084c1b5Sriastradh mutex_enter(&E->lock); 754260710e2Sriastradh ec0 = entropy_cpu_get(&lock); 755260710e2Sriastradh KASSERT(ec0 == ec); 75696b2c7deSriastradh 75796b2c7deSriastradh if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) { 758f7b53447Sriastradh /* Raced with consolidation xcall. Nothing to do. */ 75996b2c7deSriastradh } else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) { 7605084c1b5Sriastradh /* 7615084c1b5Sriastradh * If we have not yet attained full entropy but we can 7625084c1b5Sriastradh * now, do so. This way we disseminate entropy 7635084c1b5Sriastradh * promptly when it becomes available early at boot; 7645084c1b5Sriastradh * otherwise we leave it to the entropy consolidation 7655084c1b5Sriastradh * thread, which is rate-limited to mitigate side 7665084c1b5Sriastradh * channels and abuse. 7675084c1b5Sriastradh */ 7685084c1b5Sriastradh uint8_t buf[ENTPOOL_CAPACITY]; 7695084c1b5Sriastradh 7705084c1b5Sriastradh /* Transfer from the local pool to the global pool. */ 7715084c1b5Sriastradh entpool_extract(ec->ec_pool, buf, sizeof buf); 7725084c1b5Sriastradh entpool_enter(&E->pool, buf, sizeof buf); 77396b2c7deSriastradh atomic_store_relaxed(&ec->ec_bitspending, 0); 77496b2c7deSriastradh atomic_store_relaxed(&ec->ec_samplespending, 0); 77596b2c7deSriastradh atomic_store_relaxed(&E->bitsneeded, 0); 77696b2c7deSriastradh atomic_store_relaxed(&E->samplesneeded, 0); 7775084c1b5Sriastradh 7785084c1b5Sriastradh /* Notify waiters that we now have full entropy. */ 7795084c1b5Sriastradh entropy_notify(); 7805084c1b5Sriastradh entropy_immediate_evcnt.ev_count++; 781998f36adSriastradh } else { 78257981701Sriastradh /* Determine how much we can add to the global pool. */ 78396b2c7deSriastradh KASSERTMSG(E->bitspending <= MINENTROPYBITS, 78496b2c7deSriastradh "E->bitspending=%u", E->bitspending); 78596b2c7deSriastradh bitsdiff = MIN(ec->ec_bitspending, 78696b2c7deSriastradh MINENTROPYBITS - E->bitspending); 78796b2c7deSriastradh KASSERTMSG(E->samplespending <= MINSAMPLES, 78896b2c7deSriastradh "E->samplespending=%u", E->samplespending); 78996b2c7deSriastradh samplesdiff = MIN(ec->ec_samplespending, 79096b2c7deSriastradh MINSAMPLES - E->samplespending); 7915084c1b5Sriastradh 7925084c1b5Sriastradh /* 79357981701Sriastradh * This should make a difference unless we are already 79457981701Sriastradh * saturated. 7955084c1b5Sriastradh */ 79696b2c7deSriastradh KASSERTMSG((bitsdiff || samplesdiff || 79796b2c7deSriastradh E->bitspending == MINENTROPYBITS || 79896b2c7deSriastradh E->samplespending == MINSAMPLES), 79996b2c7deSriastradh "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u" 80096b2c7deSriastradh "samplesdiff=%u E->samplespending=%u" 80196b2c7deSriastradh " ec->ec_samplespending=%u" 80296b2c7deSriastradh " minentropybits=%u minsamples=%u", 80396b2c7deSriastradh bitsdiff, E->bitspending, ec->ec_bitspending, 80496b2c7deSriastradh samplesdiff, E->samplespending, ec->ec_samplespending, 80596b2c7deSriastradh (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES); 80657981701Sriastradh 80757981701Sriastradh /* Add to the global, subtract from the local. */ 80896b2c7deSriastradh E->bitspending += bitsdiff; 80996b2c7deSriastradh KASSERTMSG(E->bitspending <= MINENTROPYBITS, 81096b2c7deSriastradh "E->bitspending=%u", E->bitspending); 81196b2c7deSriastradh atomic_store_relaxed(&ec->ec_bitspending, 81296b2c7deSriastradh ec->ec_bitspending - bitsdiff); 8135084c1b5Sriastradh 81496b2c7deSriastradh E->samplespending += samplesdiff; 81596b2c7deSriastradh KASSERTMSG(E->samplespending <= MINSAMPLES, 81696b2c7deSriastradh "E->samplespending=%u", E->samplespending); 81796b2c7deSriastradh atomic_store_relaxed(&ec->ec_samplespending, 81896b2c7deSriastradh ec->ec_samplespending - samplesdiff); 81996b2c7deSriastradh 82096b2c7deSriastradh /* One or the other must have gone up from zero. */ 82196b2c7deSriastradh KASSERT(E->bitspending || E->samplespending); 82296b2c7deSriastradh 82396b2c7deSriastradh if (E->bitsneeded <= E->bitspending || 82496b2c7deSriastradh E->samplesneeded <= E->samplespending) { 8255084c1b5Sriastradh /* 82696b2c7deSriastradh * Enough bits or at least samples between all 82796b2c7deSriastradh * the per-CPU pools. Leave a note for the 82896b2c7deSriastradh * housekeeping thread to consolidate entropy 82996b2c7deSriastradh * next time it wakes up -- and wake it up if 83096b2c7deSriastradh * this is the first time, to speed things up. 8315084c1b5Sriastradh * 8325084c1b5Sriastradh * If we don't need any entropy, this doesn't 8335084c1b5Sriastradh * mean much, but it is the only time we ever 8345084c1b5Sriastradh * gather additional entropy in case the 8355084c1b5Sriastradh * accounting has been overly optimistic. This 8365084c1b5Sriastradh * happens at most once a minute, so there's 8375084c1b5Sriastradh * negligible performance cost. 8385084c1b5Sriastradh */ 8395084c1b5Sriastradh E->consolidate = true; 84096b2c7deSriastradh if (E->epoch == (unsigned)-1) 8415084c1b5Sriastradh cv_broadcast(&E->cv); 84296b2c7deSriastradh if (E->bitsneeded == 0) 8435084c1b5Sriastradh entropy_discretionary_evcnt.ev_count++; 8445084c1b5Sriastradh } else { 8455084c1b5Sriastradh /* Can't get full entropy. Keep gathering. */ 8465084c1b5Sriastradh entropy_partial_evcnt.ev_count++; 8475084c1b5Sriastradh } 8485084c1b5Sriastradh } 84996b2c7deSriastradh 850260710e2Sriastradh entropy_cpu_put(&lock, ec); 8515084c1b5Sriastradh mutex_exit(&E->lock); 8525084c1b5Sriastradh } 8535084c1b5Sriastradh 8545084c1b5Sriastradh /* 8555084c1b5Sriastradh * entropy_enter_early(buf, len, nbits) 8565084c1b5Sriastradh * 8575084c1b5Sriastradh * Do entropy bookkeeping globally, before we have established 8585084c1b5Sriastradh * per-CPU pools. Enter directly into the global pool in the hope 8595084c1b5Sriastradh * that we enter enough before the first entropy_extract to thwart 8605084c1b5Sriastradh * iterative-guessing attacks; entropy_extract will warn if not. 8615084c1b5Sriastradh */ 8625084c1b5Sriastradh static void 8635084c1b5Sriastradh entropy_enter_early(const void *buf, size_t len, unsigned nbits) 8645084c1b5Sriastradh { 8655084c1b5Sriastradh bool notify = false; 8663586ae1dSriastradh int s; 8675084c1b5Sriastradh 8683586ae1dSriastradh KASSERT(cold); 8693586ae1dSriastradh 8703586ae1dSriastradh /* 8713586ae1dSriastradh * We're early at boot before multithreading and multi-CPU 8723586ae1dSriastradh * operation, and we don't have softints yet to defer 8733586ae1dSriastradh * processing from interrupt context, so we have to enter the 8743586ae1dSriastradh * samples directly into the global pool. But interrupts may 8753586ae1dSriastradh * be enabled, and we enter this path from interrupt context, 8763586ae1dSriastradh * so block interrupts until we're done. 8773586ae1dSriastradh */ 8783586ae1dSriastradh s = splhigh(); 8795084c1b5Sriastradh 8805084c1b5Sriastradh /* Enter it into the pool. */ 8815084c1b5Sriastradh entpool_enter(&E->pool, buf, len); 8825084c1b5Sriastradh 8835084c1b5Sriastradh /* 8845084c1b5Sriastradh * Decide whether to notify reseed -- we will do so if either: 8855084c1b5Sriastradh * (a) we transition from partial entropy to full entropy, or 8865084c1b5Sriastradh * (b) we get a batch of full entropy all at once. 8873586ae1dSriastradh * We don't count timing samples because we assume, while cold, 8883586ae1dSriastradh * there's not likely to be much jitter yet. 8895084c1b5Sriastradh */ 89096b2c7deSriastradh notify |= (E->bitsneeded && E->bitsneeded <= nbits); 89196b2c7deSriastradh notify |= (nbits >= MINENTROPYBITS); 8925084c1b5Sriastradh 89396b2c7deSriastradh /* 89496b2c7deSriastradh * Subtract from the needed count and notify if appropriate. 89596b2c7deSriastradh * We don't count samples here because entropy_timer might 89696b2c7deSriastradh * still be returning zero at this point if there's no CPU 89796b2c7deSriastradh * cycle counter. 89896b2c7deSriastradh */ 89996b2c7deSriastradh E->bitsneeded -= MIN(E->bitsneeded, nbits); 9005084c1b5Sriastradh if (notify) { 9015084c1b5Sriastradh entropy_notify(); 9025084c1b5Sriastradh entropy_immediate_evcnt.ev_count++; 9035084c1b5Sriastradh } 9043586ae1dSriastradh 9053586ae1dSriastradh splx(s); 9065084c1b5Sriastradh } 9075084c1b5Sriastradh 9085084c1b5Sriastradh /* 90996b2c7deSriastradh * entropy_enter(buf, len, nbits, count) 9105084c1b5Sriastradh * 9115084c1b5Sriastradh * Enter len bytes of data from buf into the system's entropy 9125084c1b5Sriastradh * pool, stirring as necessary when the internal buffer fills up. 9135084c1b5Sriastradh * nbits is a lower bound on the number of bits of entropy in the 9145084c1b5Sriastradh * process that led to this sample. 9155084c1b5Sriastradh */ 9165084c1b5Sriastradh static void 91796b2c7deSriastradh entropy_enter(const void *buf, size_t len, unsigned nbits, bool count) 9185084c1b5Sriastradh { 919450311ecSriastradh struct entropy_cpu_lock lock; 9205084c1b5Sriastradh struct entropy_cpu *ec; 92196b2c7deSriastradh unsigned bitspending, samplespending; 9228f575ec7Sriastradh int bound; 9235084c1b5Sriastradh 9244f5c9c32Sriastradh KASSERTMSG(!cpu_intr_p(), 9255084c1b5Sriastradh "use entropy_enter_intr from interrupt context"); 9265084c1b5Sriastradh KASSERTMSG(howmany(nbits, NBBY) <= len, 9275084c1b5Sriastradh "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 9285084c1b5Sriastradh 9293586ae1dSriastradh /* 9303586ae1dSriastradh * If we're still cold, just use entropy_enter_early to put 9313586ae1dSriastradh * samples directly into the global pool. 9323586ae1dSriastradh */ 9333586ae1dSriastradh if (__predict_false(cold)) { 9345084c1b5Sriastradh entropy_enter_early(buf, len, nbits); 9355084c1b5Sriastradh return; 9365084c1b5Sriastradh } 9375084c1b5Sriastradh 9385084c1b5Sriastradh /* 9398f575ec7Sriastradh * Bind ourselves to the current CPU so we don't switch CPUs 9408f575ec7Sriastradh * between entering data into the current CPU's pool (and 9418f575ec7Sriastradh * updating the pending count) and transferring it to the 9428f575ec7Sriastradh * global pool in entropy_account_cpu. 9438f575ec7Sriastradh */ 9448f575ec7Sriastradh bound = curlwp_bind(); 9458f575ec7Sriastradh 9468f575ec7Sriastradh /* 947450311ecSriastradh * With the per-CPU state locked, enter into the per-CPU pool 948450311ecSriastradh * and count up what we can add. 94996b2c7deSriastradh * 95096b2c7deSriastradh * We don't count samples while cold because entropy_timer 95196b2c7deSriastradh * might still be returning zero if there's no CPU cycle 95296b2c7deSriastradh * counter. 9535084c1b5Sriastradh */ 954450311ecSriastradh ec = entropy_cpu_get(&lock); 9555084c1b5Sriastradh entpool_enter(ec->ec_pool, buf, len); 95696b2c7deSriastradh bitspending = ec->ec_bitspending; 95796b2c7deSriastradh bitspending += MIN(MINENTROPYBITS - bitspending, nbits); 95896b2c7deSriastradh atomic_store_relaxed(&ec->ec_bitspending, bitspending); 95996b2c7deSriastradh samplespending = ec->ec_samplespending; 96096b2c7deSriastradh if (__predict_true(count)) { 96196b2c7deSriastradh samplespending += MIN(MINSAMPLES - samplespending, 1); 96296b2c7deSriastradh atomic_store_relaxed(&ec->ec_samplespending, samplespending); 96396b2c7deSriastradh } 964450311ecSriastradh entropy_cpu_put(&lock, ec); 96588beb6d7Sriastradh 96688beb6d7Sriastradh /* Consolidate globally if appropriate based on what we added. */ 96796b2c7deSriastradh if (bitspending > 0 || samplespending >= MINSAMPLES) 96888beb6d7Sriastradh entropy_account_cpu(ec); 9698f575ec7Sriastradh 9708f575ec7Sriastradh curlwp_bindx(bound); 9715084c1b5Sriastradh } 9725084c1b5Sriastradh 9735084c1b5Sriastradh /* 97496b2c7deSriastradh * entropy_enter_intr(buf, len, nbits, count) 9755084c1b5Sriastradh * 9765084c1b5Sriastradh * Enter up to len bytes of data from buf into the system's 9775084c1b5Sriastradh * entropy pool without stirring. nbits is a lower bound on the 9785084c1b5Sriastradh * number of bits of entropy in the process that led to this 9795084c1b5Sriastradh * sample. If the sample could be entered completely, assume 9805084c1b5Sriastradh * nbits of entropy pending; otherwise assume none, since we don't 9815084c1b5Sriastradh * know whether some parts of the sample are constant, for 9825084c1b5Sriastradh * instance. Schedule a softint to stir the entropy pool if 9835084c1b5Sriastradh * needed. Return true if used fully, false if truncated at all. 9845084c1b5Sriastradh * 9853586ae1dSriastradh * Using this in thread or softint context with no spin locks held 9863586ae1dSriastradh * will work, but you might as well use entropy_enter in that 9873586ae1dSriastradh * case. 9885084c1b5Sriastradh */ 9895084c1b5Sriastradh static bool 99096b2c7deSriastradh entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count) 9915084c1b5Sriastradh { 9925084c1b5Sriastradh struct entropy_cpu *ec; 9935084c1b5Sriastradh bool fullyused = false; 99496b2c7deSriastradh uint32_t bitspending, samplespending; 9953586ae1dSriastradh int s; 9965084c1b5Sriastradh 9975084c1b5Sriastradh KASSERTMSG(howmany(nbits, NBBY) <= len, 9985084c1b5Sriastradh "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 9995084c1b5Sriastradh 10003586ae1dSriastradh /* 10013586ae1dSriastradh * If we're still cold, just use entropy_enter_early to put 10023586ae1dSriastradh * samples directly into the global pool. 10033586ae1dSriastradh */ 10043586ae1dSriastradh if (__predict_false(cold)) { 10055084c1b5Sriastradh entropy_enter_early(buf, len, nbits); 10065084c1b5Sriastradh return true; 10075084c1b5Sriastradh } 10085084c1b5Sriastradh 10095084c1b5Sriastradh /* 10103586ae1dSriastradh * In case we were called in thread or interrupt context with 10113586ae1dSriastradh * interrupts unblocked, block soft interrupts up to 10123586ae1dSriastradh * IPL_SOFTSERIAL. This way logic that is safe in interrupt 10133586ae1dSriastradh * context or under a spin lock is also safe in less 10143586ae1dSriastradh * restrictive contexts. 10153586ae1dSriastradh */ 10163586ae1dSriastradh s = splsoftserial(); 10173586ae1dSriastradh 10183586ae1dSriastradh /* 10195084c1b5Sriastradh * Acquire the per-CPU state. If someone is in the middle of 10205084c1b5Sriastradh * using it, drop the sample. Otherwise, take the lock so that 10215084c1b5Sriastradh * higher-priority interrupts will drop their samples. 10225084c1b5Sriastradh */ 10235084c1b5Sriastradh ec = percpu_getref(entropy_percpu); 1024e2caead1Sriastradh if (ec->ec_locked) { 1025e2caead1Sriastradh ec->ec_evcnt->intrdrop.ev_count++; 10265084c1b5Sriastradh goto out0; 1027e2caead1Sriastradh } 10285084c1b5Sriastradh ec->ec_locked = true; 10295084c1b5Sriastradh __insn_barrier(); 10305084c1b5Sriastradh 10315084c1b5Sriastradh /* 10325084c1b5Sriastradh * Enter as much as we can into the per-CPU pool. If it was 10335084c1b5Sriastradh * truncated, schedule a softint to stir the pool and stop. 10345084c1b5Sriastradh */ 10355084c1b5Sriastradh if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 10363586ae1dSriastradh if (__predict_true(!cold)) 10373586ae1dSriastradh softint_schedule(entropy_sih); 1038e2caead1Sriastradh ec->ec_evcnt->intrtrunc.ev_count++; 10395084c1b5Sriastradh goto out1; 10405084c1b5Sriastradh } 10415084c1b5Sriastradh fullyused = true; 10425084c1b5Sriastradh 104396b2c7deSriastradh /* 104496b2c7deSriastradh * Count up what we can contribute. 104596b2c7deSriastradh * 104696b2c7deSriastradh * We don't count samples while cold because entropy_timer 104796b2c7deSriastradh * might still be returning zero if there's no CPU cycle 104896b2c7deSriastradh * counter. 104996b2c7deSriastradh */ 105096b2c7deSriastradh bitspending = ec->ec_bitspending; 105196b2c7deSriastradh bitspending += MIN(MINENTROPYBITS - bitspending, nbits); 105296b2c7deSriastradh atomic_store_relaxed(&ec->ec_bitspending, bitspending); 105396b2c7deSriastradh if (__predict_true(count)) { 105496b2c7deSriastradh samplespending = ec->ec_samplespending; 105596b2c7deSriastradh samplespending += MIN(MINSAMPLES - samplespending, 1); 105696b2c7deSriastradh atomic_store_relaxed(&ec->ec_samplespending, samplespending); 105796b2c7deSriastradh } 10585084c1b5Sriastradh 10595084c1b5Sriastradh /* Schedule a softint if we added anything and it matters. */ 106096b2c7deSriastradh if (__predict_false(atomic_load_relaxed(&E->bitsneeded) || 10615084c1b5Sriastradh atomic_load_relaxed(&entropy_depletion)) && 10623586ae1dSriastradh (nbits != 0 || count) && 10633586ae1dSriastradh __predict_true(!cold)) 10643586ae1dSriastradh softint_schedule(entropy_sih); 10655084c1b5Sriastradh 10665084c1b5Sriastradh out1: /* Release the per-CPU state. */ 10675084c1b5Sriastradh KASSERT(ec->ec_locked); 10685084c1b5Sriastradh __insn_barrier(); 10695084c1b5Sriastradh ec->ec_locked = false; 10705084c1b5Sriastradh out0: percpu_putref(entropy_percpu); 10713586ae1dSriastradh splx(s); 10725084c1b5Sriastradh 10735084c1b5Sriastradh return fullyused; 10745084c1b5Sriastradh } 10755084c1b5Sriastradh 10765084c1b5Sriastradh /* 10775084c1b5Sriastradh * entropy_softintr(cookie) 10785084c1b5Sriastradh * 10795084c1b5Sriastradh * Soft interrupt handler for entering entropy. Takes care of 10805084c1b5Sriastradh * stirring the local CPU's entropy pool if it filled up during 10815084c1b5Sriastradh * hard interrupts, and promptly crediting entropy from the local 10825084c1b5Sriastradh * CPU's entropy pool to the global entropy pool if needed. 10835084c1b5Sriastradh */ 10845084c1b5Sriastradh static void 10855084c1b5Sriastradh entropy_softintr(void *cookie) 10865084c1b5Sriastradh { 1087450311ecSriastradh struct entropy_cpu_lock lock; 10885084c1b5Sriastradh struct entropy_cpu *ec; 108996b2c7deSriastradh unsigned bitspending, samplespending; 10905084c1b5Sriastradh 10915084c1b5Sriastradh /* 1092450311ecSriastradh * With the per-CPU state locked, stir the pool if necessary 1093450311ecSriastradh * and determine if there's any pending entropy on this CPU to 1094450311ecSriastradh * account globally. 10955084c1b5Sriastradh */ 1096450311ecSriastradh ec = entropy_cpu_get(&lock); 1097e2caead1Sriastradh ec->ec_evcnt->softint.ev_count++; 10985084c1b5Sriastradh entpool_stir(ec->ec_pool); 109996b2c7deSriastradh bitspending = ec->ec_bitspending; 110096b2c7deSriastradh samplespending = ec->ec_samplespending; 1101450311ecSriastradh entropy_cpu_put(&lock, ec); 110288beb6d7Sriastradh 110388beb6d7Sriastradh /* Consolidate globally if appropriate based on what we added. */ 110496b2c7deSriastradh if (bitspending > 0 || samplespending >= MINSAMPLES) 110588beb6d7Sriastradh entropy_account_cpu(ec); 11065084c1b5Sriastradh } 11075084c1b5Sriastradh 11085084c1b5Sriastradh /* 11095084c1b5Sriastradh * entropy_thread(cookie) 11105084c1b5Sriastradh * 11115084c1b5Sriastradh * Handle any asynchronous entropy housekeeping. 11125084c1b5Sriastradh */ 11135084c1b5Sriastradh static void 11145084c1b5Sriastradh entropy_thread(void *cookie) 11155084c1b5Sriastradh { 11163d318168Sriastradh bool consolidate; 11175084c1b5Sriastradh 111872c927ccSriastradh #ifndef _RUMPKERNEL /* XXX rump starts threads before cold */ 11193586ae1dSriastradh KASSERT(!cold); 112072c927ccSriastradh #endif 11213586ae1dSriastradh 11225084c1b5Sriastradh for (;;) { 11235084c1b5Sriastradh /* 11243d318168Sriastradh * Wait until there's full entropy somewhere among the 11253d318168Sriastradh * CPUs, as confirmed at most once per minute, or 11263d318168Sriastradh * someone wants to consolidate. 11275084c1b5Sriastradh */ 112896b2c7deSriastradh if (entropy_pending()) { 11293d318168Sriastradh consolidate = true; 11303d318168Sriastradh } else { 11315084c1b5Sriastradh mutex_enter(&E->lock); 11323d318168Sriastradh if (!E->consolidate) 11335084c1b5Sriastradh cv_timedwait(&E->cv, &E->lock, 60*hz); 11343d318168Sriastradh consolidate = E->consolidate; 11353d318168Sriastradh E->consolidate = false; 11365084c1b5Sriastradh mutex_exit(&E->lock); 11373d318168Sriastradh } 11385084c1b5Sriastradh 11393d318168Sriastradh if (consolidate) { 11405084c1b5Sriastradh /* Do it. */ 1141bbed1747Sriastradh entropy_do_consolidate(); 11425084c1b5Sriastradh 11435084c1b5Sriastradh /* Mitigate abuse. */ 11445084c1b5Sriastradh kpause("entropy", false, hz, NULL); 11455084c1b5Sriastradh } 11465084c1b5Sriastradh } 11473d318168Sriastradh } 11485084c1b5Sriastradh 114996b2c7deSriastradh struct entropy_pending_count { 115096b2c7deSriastradh uint32_t bitspending; 115196b2c7deSriastradh uint32_t samplespending; 115296b2c7deSriastradh }; 115396b2c7deSriastradh 11545084c1b5Sriastradh /* 11555084c1b5Sriastradh * entropy_pending() 11565084c1b5Sriastradh * 115796b2c7deSriastradh * True if enough bits or samples are pending on other CPUs to 115896b2c7deSriastradh * warrant consolidation. 11595084c1b5Sriastradh */ 116096b2c7deSriastradh static bool 11615084c1b5Sriastradh entropy_pending(void) 11625084c1b5Sriastradh { 116396b2c7deSriastradh struct entropy_pending_count count = { 0, 0 }, *C = &count; 11645084c1b5Sriastradh 116596b2c7deSriastradh percpu_foreach(entropy_percpu, &entropy_pending_cpu, C); 116696b2c7deSriastradh return C->bitspending >= MINENTROPYBITS || 116796b2c7deSriastradh C->samplespending >= MINSAMPLES; 11685084c1b5Sriastradh } 11695084c1b5Sriastradh 11705084c1b5Sriastradh static void 11715084c1b5Sriastradh entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 11725084c1b5Sriastradh { 11735084c1b5Sriastradh struct entropy_cpu *ec = ptr; 117496b2c7deSriastradh struct entropy_pending_count *C = cookie; 117596b2c7deSriastradh uint32_t cpu_bitspending; 117696b2c7deSriastradh uint32_t cpu_samplespending; 11775084c1b5Sriastradh 117896b2c7deSriastradh cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending); 117996b2c7deSriastradh cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending); 118096b2c7deSriastradh C->bitspending += MIN(MINENTROPYBITS - C->bitspending, 118196b2c7deSriastradh cpu_bitspending); 118296b2c7deSriastradh C->samplespending += MIN(MINSAMPLES - C->samplespending, 118396b2c7deSriastradh cpu_samplespending); 11845084c1b5Sriastradh } 11855084c1b5Sriastradh 11865084c1b5Sriastradh /* 1187bbed1747Sriastradh * entropy_do_consolidate() 11885084c1b5Sriastradh * 11895084c1b5Sriastradh * Issue a cross-call to gather entropy on all CPUs and advance 11905084c1b5Sriastradh * the entropy epoch. 11915084c1b5Sriastradh */ 11925084c1b5Sriastradh static void 1193bbed1747Sriastradh entropy_do_consolidate(void) 11945084c1b5Sriastradh { 11955084c1b5Sriastradh static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 11965084c1b5Sriastradh static struct timeval lasttime; /* serialized by E->lock */ 1197d5f6e51dSriastradh struct entpool pool; 1198d5f6e51dSriastradh uint8_t buf[ENTPOOL_CAPACITY]; 119996b2c7deSriastradh unsigned bitsdiff, samplesdiff; 12005084c1b5Sriastradh uint64_t ticket; 12015084c1b5Sriastradh 12023586ae1dSriastradh KASSERT(!cold); 12033586ae1dSriastradh ASSERT_SLEEPABLE(); 12043586ae1dSriastradh 1205d5f6e51dSriastradh /* Gather entropy on all CPUs into a temporary pool. */ 1206d5f6e51dSriastradh memset(&pool, 0, sizeof pool); 1207d5f6e51dSriastradh ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 12085084c1b5Sriastradh xc_wait(ticket); 12095084c1b5Sriastradh 12105084c1b5Sriastradh /* Acquire the lock to notify waiters. */ 12115084c1b5Sriastradh mutex_enter(&E->lock); 12125084c1b5Sriastradh 12135084c1b5Sriastradh /* Count another consolidation. */ 12145084c1b5Sriastradh entropy_consolidate_evcnt.ev_count++; 12155084c1b5Sriastradh 12165084c1b5Sriastradh /* Note when we last consolidated, i.e. now. */ 12175084c1b5Sriastradh E->timestamp = time_uptime; 12185084c1b5Sriastradh 1219d5f6e51dSriastradh /* Mix what we gathered into the global pool. */ 1220d5f6e51dSriastradh entpool_extract(&pool, buf, sizeof buf); 1221d5f6e51dSriastradh entpool_enter(&E->pool, buf, sizeof buf); 1222d5f6e51dSriastradh explicit_memset(&pool, 0, sizeof pool); 1223d5f6e51dSriastradh 12245084c1b5Sriastradh /* Count the entropy that was gathered. */ 122596b2c7deSriastradh bitsdiff = MIN(E->bitsneeded, E->bitspending); 122696b2c7deSriastradh atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff); 122796b2c7deSriastradh E->bitspending -= bitsdiff; 122896b2c7deSriastradh if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) { 1229dd68197bSriastradh if ((boothowto & AB_DEBUG) != 0 && 1230dd68197bSriastradh ratecheck(&lasttime, &interval)) { 1231dd68197bSriastradh printf("WARNING:" 12325084c1b5Sriastradh " consolidating less than full entropy\n"); 12335084c1b5Sriastradh } 12347271b779Sjmcneill } 12355084c1b5Sriastradh 123696b2c7deSriastradh samplesdiff = MIN(E->samplesneeded, E->samplespending); 123796b2c7deSriastradh atomic_store_relaxed(&E->samplesneeded, 123896b2c7deSriastradh E->samplesneeded - samplesdiff); 123996b2c7deSriastradh E->samplespending -= samplesdiff; 124096b2c7deSriastradh 12415084c1b5Sriastradh /* Advance the epoch and notify waiters. */ 12425084c1b5Sriastradh entropy_notify(); 12435084c1b5Sriastradh 12445084c1b5Sriastradh /* Release the lock. */ 12455084c1b5Sriastradh mutex_exit(&E->lock); 12465084c1b5Sriastradh } 12475084c1b5Sriastradh 12485084c1b5Sriastradh /* 12492bd92f80Sriastradh * entropy_consolidate_xc(vpool, arg2) 12505084c1b5Sriastradh * 12515084c1b5Sriastradh * Extract output from the local CPU's input pool and enter it 12522bd92f80Sriastradh * into a temporary pool passed as vpool. 12535084c1b5Sriastradh */ 12545084c1b5Sriastradh static void 1255d5f6e51dSriastradh entropy_consolidate_xc(void *vpool, void *arg2 __unused) 12565084c1b5Sriastradh { 1257d5f6e51dSriastradh struct entpool *pool = vpool; 1258450311ecSriastradh struct entropy_cpu_lock lock; 12595084c1b5Sriastradh struct entropy_cpu *ec; 12605084c1b5Sriastradh uint8_t buf[ENTPOOL_CAPACITY]; 12615084c1b5Sriastradh uint32_t extra[7]; 12625084c1b5Sriastradh unsigned i = 0; 12635084c1b5Sriastradh 12645084c1b5Sriastradh /* Grab CPU number and cycle counter to mix extra into the pool. */ 12655084c1b5Sriastradh extra[i++] = cpu_number(); 12665084c1b5Sriastradh extra[i++] = entropy_timer(); 12675084c1b5Sriastradh 12685084c1b5Sriastradh /* 1269450311ecSriastradh * With the per-CPU state locked, extract from the per-CPU pool 1270450311ecSriastradh * and count it as no longer pending. 12715084c1b5Sriastradh */ 1272450311ecSriastradh ec = entropy_cpu_get(&lock); 12735084c1b5Sriastradh extra[i++] = entropy_timer(); 12745084c1b5Sriastradh entpool_extract(ec->ec_pool, buf, sizeof buf); 127596b2c7deSriastradh atomic_store_relaxed(&ec->ec_bitspending, 0); 127696b2c7deSriastradh atomic_store_relaxed(&ec->ec_samplespending, 0); 12775084c1b5Sriastradh extra[i++] = entropy_timer(); 1278450311ecSriastradh entropy_cpu_put(&lock, ec); 12795084c1b5Sriastradh extra[i++] = entropy_timer(); 12805084c1b5Sriastradh 12815084c1b5Sriastradh /* 12825084c1b5Sriastradh * Copy over statistics, and enter the per-CPU extract and the 1283d5f6e51dSriastradh * extra timing into the temporary pool, under the global lock. 12845084c1b5Sriastradh */ 12855084c1b5Sriastradh mutex_enter(&E->lock); 12865084c1b5Sriastradh extra[i++] = entropy_timer(); 1287d5f6e51dSriastradh entpool_enter(pool, buf, sizeof buf); 12885084c1b5Sriastradh explicit_memset(buf, 0, sizeof buf); 12895084c1b5Sriastradh extra[i++] = entropy_timer(); 12905084c1b5Sriastradh KASSERT(i == __arraycount(extra)); 1291d5f6e51dSriastradh entpool_enter(pool, extra, sizeof extra); 12925084c1b5Sriastradh explicit_memset(extra, 0, sizeof extra); 12935084c1b5Sriastradh mutex_exit(&E->lock); 12945084c1b5Sriastradh } 12955084c1b5Sriastradh 12965084c1b5Sriastradh /* 12975084c1b5Sriastradh * entropy_notify() 12985084c1b5Sriastradh * 12995084c1b5Sriastradh * Caller just contributed entropy to the global pool. Advance 13005084c1b5Sriastradh * the entropy epoch and notify waiters. 13015084c1b5Sriastradh * 130296b2c7deSriastradh * Caller must hold the global entropy lock. 13035084c1b5Sriastradh */ 13045084c1b5Sriastradh static void 13055084c1b5Sriastradh entropy_notify(void) 13065084c1b5Sriastradh { 1307caee4314Sriastradh static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1308caee4314Sriastradh static struct timeval lasttime; /* serialized by E->lock */ 130996b2c7deSriastradh static bool ready = false, besteffort = false; 13105084c1b5Sriastradh unsigned epoch; 13115084c1b5Sriastradh 13123586ae1dSriastradh KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 13135084c1b5Sriastradh 13145084c1b5Sriastradh /* 13155084c1b5Sriastradh * If this is the first time, print a message to the console 13165084c1b5Sriastradh * that we're ready so operators can compare it to the timing 13175084c1b5Sriastradh * of other events. 131896b2c7deSriastradh * 131996b2c7deSriastradh * If we didn't get full entropy from reliable sources, report 132096b2c7deSriastradh * instead that we are running on fumes with best effort. (If 132196b2c7deSriastradh * we ever do get full entropy after that, print the ready 132296b2c7deSriastradh * message once.) 13235084c1b5Sriastradh */ 132496b2c7deSriastradh if (__predict_false(!ready)) { 132596b2c7deSriastradh if (E->bitsneeded == 0) { 13265084c1b5Sriastradh printf("entropy: ready\n"); 132796b2c7deSriastradh ready = true; 132896b2c7deSriastradh } else if (E->samplesneeded == 0 && !besteffort) { 132996b2c7deSriastradh printf("entropy: best effort\n"); 133096b2c7deSriastradh besteffort = true; 133196b2c7deSriastradh } 133296b2c7deSriastradh } 13335084c1b5Sriastradh 13345084c1b5Sriastradh /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1335caee4314Sriastradh if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1336caee4314Sriastradh ratecheck(&lasttime, &interval)) { 13375084c1b5Sriastradh epoch = E->epoch + 1; 13385084c1b5Sriastradh if (epoch == 0 || epoch == (unsigned)-1) 13395084c1b5Sriastradh epoch = 1; 13405084c1b5Sriastradh atomic_store_relaxed(&E->epoch, epoch); 1341caee4314Sriastradh } 134266528ec8Sriastradh KASSERT(E->epoch != (unsigned)-1); 13435084c1b5Sriastradh 13445084c1b5Sriastradh /* Notify waiters. */ 13453586ae1dSriastradh if (__predict_true(!cold)) { 13465084c1b5Sriastradh cv_broadcast(&E->cv); 13475084c1b5Sriastradh selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 13485084c1b5Sriastradh } 13495084c1b5Sriastradh 13505084c1b5Sriastradh /* Count another notification. */ 13515084c1b5Sriastradh entropy_notify_evcnt.ev_count++; 13525084c1b5Sriastradh } 13535084c1b5Sriastradh 13545084c1b5Sriastradh /* 1355bbed1747Sriastradh * entropy_consolidate() 13565084c1b5Sriastradh * 13571650e193Sriastradh * Trigger entropy consolidation and wait for it to complete, or 13581650e193Sriastradh * return EINTR if interrupted by a signal. 1359bbed1747Sriastradh * 1360bbed1747Sriastradh * This should be used sparingly, not periodically -- requiring 1361bbed1747Sriastradh * conscious intervention by the operator or a clear policy 1362bbed1747Sriastradh * decision. Otherwise, the kernel will automatically consolidate 1363bbed1747Sriastradh * when enough entropy has been gathered into per-CPU pools to 1364bbed1747Sriastradh * transition to full entropy. 13655084c1b5Sriastradh */ 13661650e193Sriastradh int 1367*4dd1804bSriastradh entropy_consolidate(void) 13685084c1b5Sriastradh { 13695084c1b5Sriastradh uint64_t ticket; 13705084c1b5Sriastradh int error; 13715084c1b5Sriastradh 13723586ae1dSriastradh KASSERT(!cold); 13733586ae1dSriastradh ASSERT_SLEEPABLE(); 13745084c1b5Sriastradh 13755084c1b5Sriastradh mutex_enter(&E->lock); 13765084c1b5Sriastradh ticket = entropy_consolidate_evcnt.ev_count; 13775084c1b5Sriastradh E->consolidate = true; 13785084c1b5Sriastradh cv_broadcast(&E->cv); 13795084c1b5Sriastradh while (ticket == entropy_consolidate_evcnt.ev_count) { 13805084c1b5Sriastradh error = cv_wait_sig(&E->cv, &E->lock); 13815084c1b5Sriastradh if (error) 13825084c1b5Sriastradh break; 13835084c1b5Sriastradh } 13845084c1b5Sriastradh mutex_exit(&E->lock); 13851650e193Sriastradh 13861650e193Sriastradh return error; 13875084c1b5Sriastradh } 13885084c1b5Sriastradh 1389bbed1747Sriastradh /* 1390bbed1747Sriastradh * sysctl -w kern.entropy.consolidate=1 1391bbed1747Sriastradh * 1392bbed1747Sriastradh * Trigger entropy consolidation and wait for it to complete. 1393bbed1747Sriastradh * Writable only by superuser. This, writing to /dev/random, and 1394bbed1747Sriastradh * ioctl(RNDADDDATA) are the only ways for the system to 1395bbed1747Sriastradh * consolidate entropy if the operator knows something the kernel 1396bbed1747Sriastradh * doesn't about how unpredictable the pending entropy pools are. 1397bbed1747Sriastradh */ 1398bbed1747Sriastradh static int 1399bbed1747Sriastradh sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1400bbed1747Sriastradh { 1401bbed1747Sriastradh struct sysctlnode node = *rnode; 140221ea4580Sriastradh int arg = 0; 1403bbed1747Sriastradh int error; 1404bbed1747Sriastradh 1405bbed1747Sriastradh node.sysctl_data = &arg; 1406bbed1747Sriastradh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1407bbed1747Sriastradh if (error || newp == NULL) 1408bbed1747Sriastradh return error; 1409bbed1747Sriastradh if (arg) 1410*4dd1804bSriastradh error = entropy_consolidate(); 1411bbed1747Sriastradh 14125084c1b5Sriastradh return error; 14135084c1b5Sriastradh } 14145084c1b5Sriastradh 14155084c1b5Sriastradh /* 141645b27d01Sriastradh * entropy_gather() 141745b27d01Sriastradh * 141845b27d01Sriastradh * Trigger gathering entropy from all on-demand sources, and, if 141945b27d01Sriastradh * requested, wait for synchronous sources (but not asynchronous 142045b27d01Sriastradh * sources) to complete, or fail with EINTR if interrupted by a 142145b27d01Sriastradh * signal. 142245b27d01Sriastradh */ 142345b27d01Sriastradh int 142445b27d01Sriastradh entropy_gather(void) 142545b27d01Sriastradh { 142645b27d01Sriastradh int error; 142745b27d01Sriastradh 142845b27d01Sriastradh mutex_enter(&E->lock); 142945b27d01Sriastradh error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT|ENTROPY_SIG); 143045b27d01Sriastradh mutex_exit(&E->lock); 143145b27d01Sriastradh 143245b27d01Sriastradh return error; 143345b27d01Sriastradh } 143445b27d01Sriastradh 143545b27d01Sriastradh /* 1436ec335737Sriastradh * sysctl -w kern.entropy.gather=1 1437ec335737Sriastradh * 1438ec335737Sriastradh * Trigger gathering entropy from all on-demand sources, and wait 1439ec335737Sriastradh * for synchronous sources (but not asynchronous sources) to 1440ec335737Sriastradh * complete. Writable only by superuser. 1441ec335737Sriastradh */ 1442ec335737Sriastradh static int 1443ec335737Sriastradh sysctl_entropy_gather(SYSCTLFN_ARGS) 1444ec335737Sriastradh { 1445ec335737Sriastradh struct sysctlnode node = *rnode; 144621ea4580Sriastradh int arg = 0; 1447ec335737Sriastradh int error; 1448ec335737Sriastradh 1449ec335737Sriastradh node.sysctl_data = &arg; 1450ec335737Sriastradh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1451ec335737Sriastradh if (error || newp == NULL) 1452ec335737Sriastradh return error; 145345b27d01Sriastradh if (arg) 145445b27d01Sriastradh error = entropy_gather(); 1455ec335737Sriastradh 1456a9f38d11Sriastradh return error; 1457ec335737Sriastradh } 1458ec335737Sriastradh 1459ec335737Sriastradh /* 14605084c1b5Sriastradh * entropy_extract(buf, len, flags) 14615084c1b5Sriastradh * 14625084c1b5Sriastradh * Extract len bytes from the global entropy pool into buf. 14635084c1b5Sriastradh * 146459f579f5Sriastradh * Caller MUST NOT expose these bytes directly -- must use them 146559f579f5Sriastradh * ONLY to seed a cryptographic pseudorandom number generator 146659f579f5Sriastradh * (`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'), 146759f579f5Sriastradh * and then erase them. entropy_extract does not, on its own, 146859f579f5Sriastradh * provide backtracking resistance -- it must be combined with a 146959f579f5Sriastradh * PRNG/DRBG that does. 147059f579f5Sriastradh * 14713586ae1dSriastradh * This may be used very early at boot, before even entropy_init 14723586ae1dSriastradh * has been called. 14733586ae1dSriastradh * 147459f579f5Sriastradh * You generally shouldn't use this directly -- use cprng(9) 147559f579f5Sriastradh * instead. 147659f579f5Sriastradh * 14775084c1b5Sriastradh * Flags may have: 14785084c1b5Sriastradh * 14795084c1b5Sriastradh * ENTROPY_WAIT Wait for entropy if not available yet. 14805084c1b5Sriastradh * ENTROPY_SIG Allow interruption by a signal during wait. 1481bdad8b27Sriastradh * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1482bdad8b27Sriastradh * or fail without filling it at all. 14835084c1b5Sriastradh * 14845084c1b5Sriastradh * Return zero on success, or error on failure: 14855084c1b5Sriastradh * 14865084c1b5Sriastradh * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 14875084c1b5Sriastradh * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 14885084c1b5Sriastradh * 14895084c1b5Sriastradh * If ENTROPY_WAIT is set, allowed only in thread context. If 1490763d441dSriastradh * ENTROPY_WAIT is not set, allowed also in softint context -- may 1491763d441dSriastradh * sleep on an adaptive lock up to IPL_SOFTSERIAL. Forbidden in 1492763d441dSriastradh * hard interrupt context. 14935084c1b5Sriastradh */ 14945084c1b5Sriastradh int 14955084c1b5Sriastradh entropy_extract(void *buf, size_t len, int flags) 14965084c1b5Sriastradh { 14975084c1b5Sriastradh static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 14985084c1b5Sriastradh static struct timeval lasttime; /* serialized by E->lock */ 149996b2c7deSriastradh bool printed = false; 15003586ae1dSriastradh int s = -1/*XXXGCC*/, error; 15015084c1b5Sriastradh 15025084c1b5Sriastradh if (ISSET(flags, ENTROPY_WAIT)) { 15035084c1b5Sriastradh ASSERT_SLEEPABLE(); 15043586ae1dSriastradh KASSERT(!cold); 15055084c1b5Sriastradh } 15065084c1b5Sriastradh 1507a820d532Sriastradh /* Refuse to operate in interrupt context. */ 1508a820d532Sriastradh KASSERT(!cpu_intr_p()); 1509a820d532Sriastradh 15103586ae1dSriastradh /* 15113586ae1dSriastradh * If we're cold, we are only contending with interrupts on the 15123586ae1dSriastradh * current CPU, so block them. Otherwise, we are _not_ 15133586ae1dSriastradh * contending with interrupts on the current CPU, but we are 15143586ae1dSriastradh * contending with other threads, to exclude them with a mutex. 15153586ae1dSriastradh */ 15163586ae1dSriastradh if (__predict_false(cold)) 15173586ae1dSriastradh s = splhigh(); 15183586ae1dSriastradh else 15195084c1b5Sriastradh mutex_enter(&E->lock); 15205084c1b5Sriastradh 15215084c1b5Sriastradh /* Wait until there is enough entropy in the system. */ 15225084c1b5Sriastradh error = 0; 152396b2c7deSriastradh if (E->bitsneeded > 0 && E->samplesneeded == 0) { 152496b2c7deSriastradh /* 152596b2c7deSriastradh * We don't have full entropy from reliable sources, 152696b2c7deSriastradh * but we gathered a plausible number of samples from 152796b2c7deSriastradh * other sources such as timers. Try asking for more 152896b2c7deSriastradh * from any sources we can, but don't worry if it 152996b2c7deSriastradh * fails -- best effort. 153096b2c7deSriastradh */ 153196b2c7deSriastradh (void)entropy_request(ENTROPY_CAPACITY, flags); 153296b2c7deSriastradh } else while (E->bitsneeded > 0 && E->samplesneeded > 0) { 15335084c1b5Sriastradh /* Ask for more, synchronously if possible. */ 153489444d3fSriastradh error = entropy_request(len, flags); 153589444d3fSriastradh if (error) 153689444d3fSriastradh break; 15375084c1b5Sriastradh 15385084c1b5Sriastradh /* If we got enough, we're done. */ 153996b2c7deSriastradh if (E->bitsneeded == 0 || E->samplesneeded == 0) { 15405084c1b5Sriastradh KASSERT(error == 0); 15415084c1b5Sriastradh break; 15425084c1b5Sriastradh } 15435084c1b5Sriastradh 15445084c1b5Sriastradh /* If not waiting, stop here. */ 15455084c1b5Sriastradh if (!ISSET(flags, ENTROPY_WAIT)) { 15465084c1b5Sriastradh error = EWOULDBLOCK; 15475084c1b5Sriastradh break; 15485084c1b5Sriastradh } 15495084c1b5Sriastradh 15505084c1b5Sriastradh /* Wait for some entropy to come in and try again. */ 15513586ae1dSriastradh KASSERT(!cold); 155296b2c7deSriastradh if (!printed) { 155396b2c7deSriastradh printf("entropy: pid %d (%s) waiting for entropy(7)\n", 155430dac487Sgson curproc->p_pid, curproc->p_comm); 155596b2c7deSriastradh printed = true; 155696b2c7deSriastradh } 155730dac487Sgson 15585084c1b5Sriastradh if (ISSET(flags, ENTROPY_SIG)) { 155996b2c7deSriastradh error = cv_timedwait_sig(&E->cv, &E->lock, hz); 156096b2c7deSriastradh if (error && error != EWOULDBLOCK) 15615084c1b5Sriastradh break; 15625084c1b5Sriastradh } else { 156396b2c7deSriastradh cv_timedwait(&E->cv, &E->lock, hz); 15645084c1b5Sriastradh } 15655084c1b5Sriastradh } 15665084c1b5Sriastradh 1567bdad8b27Sriastradh /* 1568bdad8b27Sriastradh * Count failure -- but fill the buffer nevertheless, unless 1569bdad8b27Sriastradh * the caller specified ENTROPY_HARDFAIL. 1570bdad8b27Sriastradh */ 1571bdad8b27Sriastradh if (error) { 1572bdad8b27Sriastradh if (ISSET(flags, ENTROPY_HARDFAIL)) 1573bdad8b27Sriastradh goto out; 15745084c1b5Sriastradh entropy_extract_fail_evcnt.ev_count++; 1575bdad8b27Sriastradh } 15765084c1b5Sriastradh 15775084c1b5Sriastradh /* 157896b2c7deSriastradh * Report a warning if we haven't yet reached full entropy. 15795084c1b5Sriastradh * This is the only case where we consider entropy to be 15805084c1b5Sriastradh * `depleted' without kern.entropy.depletion enabled -- when we 15815084c1b5Sriastradh * only have partial entropy, an adversary may be able to 15825084c1b5Sriastradh * narrow the state of the pool down to a small number of 15835084c1b5Sriastradh * possibilities; the output then enables them to confirm a 15845084c1b5Sriastradh * guess, reducing its entropy from the adversary's perspective 15855084c1b5Sriastradh * to zero. 158696b2c7deSriastradh * 158796b2c7deSriastradh * This should only happen if the operator has chosen to 158896b2c7deSriastradh * consolidate, either through sysctl kern.entropy.consolidate 158996b2c7deSriastradh * or by writing less than full entropy to /dev/random as root 159096b2c7deSriastradh * (which /dev/random promises will immediately affect 159196b2c7deSriastradh * subsequent output, for better or worse). 15925084c1b5Sriastradh */ 159396b2c7deSriastradh if (E->bitsneeded > 0 && E->samplesneeded > 0) { 159496b2c7deSriastradh if (__predict_false(E->epoch == (unsigned)-1) && 159596b2c7deSriastradh ratecheck(&lasttime, &interval)) { 1596dd68197bSriastradh printf("WARNING:" 1597dd68197bSriastradh " system needs entropy for security;" 1598dd68197bSriastradh " see entropy(7)\n"); 159996b2c7deSriastradh } 160096b2c7deSriastradh atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); 160196b2c7deSriastradh atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); 16025084c1b5Sriastradh } 16035084c1b5Sriastradh 16045084c1b5Sriastradh /* Extract data from the pool, and `deplete' if we're doing that. */ 16055084c1b5Sriastradh entpool_extract(&E->pool, buf, len); 16065084c1b5Sriastradh if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 16075084c1b5Sriastradh error == 0) { 16085084c1b5Sriastradh unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 160996b2c7deSriastradh unsigned bitsneeded = E->bitsneeded; 161096b2c7deSriastradh unsigned samplesneeded = E->samplesneeded; 16115084c1b5Sriastradh 161296b2c7deSriastradh bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost); 161396b2c7deSriastradh samplesneeded += MIN(MINSAMPLES - samplesneeded, cost); 161496b2c7deSriastradh 161596b2c7deSriastradh atomic_store_relaxed(&E->bitsneeded, bitsneeded); 161696b2c7deSriastradh atomic_store_relaxed(&E->samplesneeded, samplesneeded); 16175084c1b5Sriastradh entropy_deplete_evcnt.ev_count++; 16185084c1b5Sriastradh } 16195084c1b5Sriastradh 1620bdad8b27Sriastradh out: /* Release the global lock and return the error. */ 16213586ae1dSriastradh if (__predict_false(cold)) 16223586ae1dSriastradh splx(s); 16233586ae1dSriastradh else 16245084c1b5Sriastradh mutex_exit(&E->lock); 16255084c1b5Sriastradh return error; 16265084c1b5Sriastradh } 16275084c1b5Sriastradh 16285084c1b5Sriastradh /* 16295084c1b5Sriastradh * entropy_poll(events) 16305084c1b5Sriastradh * 16315084c1b5Sriastradh * Return the subset of events ready, and if it is not all of 16325084c1b5Sriastradh * events, record curlwp as waiting for entropy. 16335084c1b5Sriastradh */ 16345084c1b5Sriastradh int 16355084c1b5Sriastradh entropy_poll(int events) 16365084c1b5Sriastradh { 16375084c1b5Sriastradh int revents = 0; 16385084c1b5Sriastradh 16393586ae1dSriastradh KASSERT(!cold); 16405084c1b5Sriastradh 16415084c1b5Sriastradh /* Always ready for writing. */ 16425084c1b5Sriastradh revents |= events & (POLLOUT|POLLWRNORM); 16435084c1b5Sriastradh 16445084c1b5Sriastradh /* Narrow it down to reads. */ 16455084c1b5Sriastradh events &= POLLIN|POLLRDNORM; 16465084c1b5Sriastradh if (events == 0) 16475084c1b5Sriastradh return revents; 16485084c1b5Sriastradh 16495084c1b5Sriastradh /* 16505084c1b5Sriastradh * If we have reached full entropy and we're not depleting 16515084c1b5Sriastradh * entropy, we are forever ready. 16525084c1b5Sriastradh */ 165396b2c7deSriastradh if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 || 165496b2c7deSriastradh atomic_load_relaxed(&E->samplesneeded) == 0) && 16555084c1b5Sriastradh __predict_true(!atomic_load_relaxed(&entropy_depletion))) 16565084c1b5Sriastradh return revents | events; 16575084c1b5Sriastradh 16585084c1b5Sriastradh /* 16595084c1b5Sriastradh * Otherwise, check whether we need entropy under the lock. If 16605084c1b5Sriastradh * we don't, we're ready; if we do, add ourselves to the queue. 16615084c1b5Sriastradh */ 16625084c1b5Sriastradh mutex_enter(&E->lock); 166396b2c7deSriastradh if (E->bitsneeded == 0 || E->samplesneeded == 0) 16645084c1b5Sriastradh revents |= events; 16655084c1b5Sriastradh else 16665084c1b5Sriastradh selrecord(curlwp, &E->selq); 16675084c1b5Sriastradh mutex_exit(&E->lock); 16685084c1b5Sriastradh 16695084c1b5Sriastradh return revents; 16705084c1b5Sriastradh } 16715084c1b5Sriastradh 16725084c1b5Sriastradh /* 16735084c1b5Sriastradh * filt_entropy_read_detach(kn) 16745084c1b5Sriastradh * 16755084c1b5Sriastradh * struct filterops::f_detach callback for entropy read events: 16765084c1b5Sriastradh * remove kn from the list of waiters. 16775084c1b5Sriastradh */ 16785084c1b5Sriastradh static void 16795084c1b5Sriastradh filt_entropy_read_detach(struct knote *kn) 16805084c1b5Sriastradh { 16815084c1b5Sriastradh 16823586ae1dSriastradh KASSERT(!cold); 16835084c1b5Sriastradh 16845084c1b5Sriastradh mutex_enter(&E->lock); 16852ef9bcafSthorpej selremove_knote(&E->selq, kn); 16865084c1b5Sriastradh mutex_exit(&E->lock); 16875084c1b5Sriastradh } 16885084c1b5Sriastradh 16895084c1b5Sriastradh /* 16905084c1b5Sriastradh * filt_entropy_read_event(kn, hint) 16915084c1b5Sriastradh * 16925084c1b5Sriastradh * struct filterops::f_event callback for entropy read events: 16935084c1b5Sriastradh * poll for entropy. Caller must hold the global entropy lock if 16945084c1b5Sriastradh * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 16955084c1b5Sriastradh */ 16965084c1b5Sriastradh static int 16975084c1b5Sriastradh filt_entropy_read_event(struct knote *kn, long hint) 16985084c1b5Sriastradh { 16995084c1b5Sriastradh int ret; 17005084c1b5Sriastradh 17013586ae1dSriastradh KASSERT(!cold); 17025084c1b5Sriastradh 17035084c1b5Sriastradh /* Acquire the lock, if caller is outside entropy subsystem. */ 17045084c1b5Sriastradh if (hint == NOTE_SUBMIT) 17055084c1b5Sriastradh KASSERT(mutex_owned(&E->lock)); 17065084c1b5Sriastradh else 17075084c1b5Sriastradh mutex_enter(&E->lock); 17085084c1b5Sriastradh 17095084c1b5Sriastradh /* 17105084c1b5Sriastradh * If we still need entropy, can't read anything; if not, can 17115084c1b5Sriastradh * read arbitrarily much. 17125084c1b5Sriastradh */ 171396b2c7deSriastradh if (E->bitsneeded != 0 && E->samplesneeded != 0) { 17145084c1b5Sriastradh ret = 0; 17155084c1b5Sriastradh } else { 17165084c1b5Sriastradh if (atomic_load_relaxed(&entropy_depletion)) 1717a816c0f9Sriastradh kn->kn_data = ENTROPY_CAPACITY; /* bytes */ 17185084c1b5Sriastradh else 17195084c1b5Sriastradh kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 17205084c1b5Sriastradh ret = 1; 17215084c1b5Sriastradh } 17225084c1b5Sriastradh 17235084c1b5Sriastradh /* Release the lock, if caller is outside entropy subsystem. */ 17245084c1b5Sriastradh if (hint == NOTE_SUBMIT) 17255084c1b5Sriastradh KASSERT(mutex_owned(&E->lock)); 17265084c1b5Sriastradh else 17275084c1b5Sriastradh mutex_exit(&E->lock); 17285084c1b5Sriastradh 17295084c1b5Sriastradh return ret; 17305084c1b5Sriastradh } 17315084c1b5Sriastradh 1732f5455836Sthorpej /* XXX Makes sense only for /dev/u?random. */ 17335084c1b5Sriastradh static const struct filterops entropy_read_filtops = { 1734f5455836Sthorpej .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 17355084c1b5Sriastradh .f_attach = NULL, 17365084c1b5Sriastradh .f_detach = filt_entropy_read_detach, 17375084c1b5Sriastradh .f_event = filt_entropy_read_event, 17385084c1b5Sriastradh }; 17395084c1b5Sriastradh 17405084c1b5Sriastradh /* 17415084c1b5Sriastradh * entropy_kqfilter(kn) 17425084c1b5Sriastradh * 17435084c1b5Sriastradh * Register kn to receive entropy event notifications. May be 17445084c1b5Sriastradh * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 17455084c1b5Sriastradh */ 17465084c1b5Sriastradh int 17475084c1b5Sriastradh entropy_kqfilter(struct knote *kn) 17485084c1b5Sriastradh { 17495084c1b5Sriastradh 17503586ae1dSriastradh KASSERT(!cold); 17515084c1b5Sriastradh 17525084c1b5Sriastradh switch (kn->kn_filter) { 17535084c1b5Sriastradh case EVFILT_READ: 17545084c1b5Sriastradh /* Enter into the global select queue. */ 17555084c1b5Sriastradh mutex_enter(&E->lock); 17565084c1b5Sriastradh kn->kn_fop = &entropy_read_filtops; 17572ef9bcafSthorpej selrecord_knote(&E->selq, kn); 17585084c1b5Sriastradh mutex_exit(&E->lock); 17595084c1b5Sriastradh return 0; 17605084c1b5Sriastradh case EVFILT_WRITE: 17615084c1b5Sriastradh /* Can always dump entropy into the system. */ 17625084c1b5Sriastradh kn->kn_fop = &seltrue_filtops; 17635084c1b5Sriastradh return 0; 17645084c1b5Sriastradh default: 17655084c1b5Sriastradh return EINVAL; 17665084c1b5Sriastradh } 17675084c1b5Sriastradh } 17685084c1b5Sriastradh 17695084c1b5Sriastradh /* 17705084c1b5Sriastradh * rndsource_setcb(rs, get, getarg) 17715084c1b5Sriastradh * 17725084c1b5Sriastradh * Set the request callback for the entropy source rs, if it can 17735084c1b5Sriastradh * provide entropy on demand. Must precede rnd_attach_source. 17745084c1b5Sriastradh */ 17755084c1b5Sriastradh void 17765084c1b5Sriastradh rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 17775084c1b5Sriastradh void *getarg) 17785084c1b5Sriastradh { 17795084c1b5Sriastradh 17805084c1b5Sriastradh rs->get = get; 17815084c1b5Sriastradh rs->getarg = getarg; 17825084c1b5Sriastradh } 17835084c1b5Sriastradh 17845084c1b5Sriastradh /* 17855084c1b5Sriastradh * rnd_attach_source(rs, name, type, flags) 17865084c1b5Sriastradh * 17875084c1b5Sriastradh * Attach the entropy source rs. Must be done after 17885084c1b5Sriastradh * rndsource_setcb, if any, and before any calls to rnd_add_data. 17895084c1b5Sriastradh */ 17905084c1b5Sriastradh void 17915084c1b5Sriastradh rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 17925084c1b5Sriastradh uint32_t flags) 17935084c1b5Sriastradh { 17945084c1b5Sriastradh uint32_t extra[4]; 17955084c1b5Sriastradh unsigned i = 0; 17965084c1b5Sriastradh 179730c052bdSriastradh KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name"); 179830c052bdSriastradh 17995084c1b5Sriastradh /* Grab cycle counter to mix extra into the pool. */ 18005084c1b5Sriastradh extra[i++] = entropy_timer(); 18015084c1b5Sriastradh 18025084c1b5Sriastradh /* 18035084c1b5Sriastradh * Apply some standard flags: 18045084c1b5Sriastradh * 18055084c1b5Sriastradh * - We do not bother with network devices by default, for 18065084c1b5Sriastradh * hysterical raisins (perhaps: because it is often the case 18075084c1b5Sriastradh * that an adversary can influence network packet timings). 18085084c1b5Sriastradh */ 18095084c1b5Sriastradh switch (type) { 18105084c1b5Sriastradh case RND_TYPE_NET: 18115084c1b5Sriastradh flags |= RND_FLAG_NO_COLLECT; 18125084c1b5Sriastradh break; 18135084c1b5Sriastradh } 18145084c1b5Sriastradh 18155084c1b5Sriastradh /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 18165084c1b5Sriastradh KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 18175084c1b5Sriastradh 18185084c1b5Sriastradh /* Initialize the random source. */ 18195084c1b5Sriastradh memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 18205084c1b5Sriastradh strlcpy(rs->name, name, sizeof(rs->name)); 18213f5d9c7dSriastradh memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 18223f5d9c7dSriastradh memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 18239881ae0bSriastradh rs->total = 0; 18245084c1b5Sriastradh rs->type = type; 18255084c1b5Sriastradh rs->flags = flags; 18263586ae1dSriastradh if (entropy_percpu != NULL) 18275084c1b5Sriastradh rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 18285084c1b5Sriastradh extra[i++] = entropy_timer(); 18295084c1b5Sriastradh 18305084c1b5Sriastradh /* Wire it into the global list of random sources. */ 18313586ae1dSriastradh if (__predict_true(!cold)) 18325084c1b5Sriastradh mutex_enter(&E->lock); 18335084c1b5Sriastradh LIST_INSERT_HEAD(&E->sources, rs, list); 18343586ae1dSriastradh if (__predict_true(!cold)) 18355084c1b5Sriastradh mutex_exit(&E->lock); 18365084c1b5Sriastradh extra[i++] = entropy_timer(); 18375084c1b5Sriastradh 18385084c1b5Sriastradh /* Request that it provide entropy ASAP, if we can. */ 18395084c1b5Sriastradh if (ISSET(flags, RND_FLAG_HASCB)) 18405084c1b5Sriastradh (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 18415084c1b5Sriastradh extra[i++] = entropy_timer(); 18425084c1b5Sriastradh 18435084c1b5Sriastradh /* Mix the extra into the pool. */ 18445084c1b5Sriastradh KASSERT(i == __arraycount(extra)); 18453586ae1dSriastradh entropy_enter(extra, sizeof extra, 0, /*count*/__predict_true(!cold)); 18465084c1b5Sriastradh explicit_memset(extra, 0, sizeof extra); 18475084c1b5Sriastradh } 18485084c1b5Sriastradh 18495084c1b5Sriastradh /* 18505084c1b5Sriastradh * rnd_detach_source(rs) 18515084c1b5Sriastradh * 18525084c1b5Sriastradh * Detach the entropy source rs. May sleep waiting for users to 18535084c1b5Sriastradh * drain. Further use is not allowed. 18545084c1b5Sriastradh */ 18555084c1b5Sriastradh void 18565084c1b5Sriastradh rnd_detach_source(struct krndsource *rs) 18575084c1b5Sriastradh { 18585084c1b5Sriastradh 18595084c1b5Sriastradh /* 18605084c1b5Sriastradh * If we're cold (shouldn't happen, but hey), just remove it 18615084c1b5Sriastradh * from the list -- there's nothing allocated. 18625084c1b5Sriastradh */ 18633586ae1dSriastradh if (__predict_false(cold) && entropy_percpu == NULL) { 18645084c1b5Sriastradh LIST_REMOVE(rs, list); 18655084c1b5Sriastradh return; 18665084c1b5Sriastradh } 18675084c1b5Sriastradh 18685084c1b5Sriastradh /* We may have to wait for entropy_request. */ 18695084c1b5Sriastradh ASSERT_SLEEPABLE(); 18705084c1b5Sriastradh 1871708a423cSriastradh /* Wait until the source list is not in use, and remove it. */ 18725084c1b5Sriastradh mutex_enter(&E->lock); 1873708a423cSriastradh while (E->sourcelock) 187436a480a1Sriastradh cv_wait(&E->sourcelock_cv, &E->lock); 1875708a423cSriastradh LIST_REMOVE(rs, list); 18765084c1b5Sriastradh mutex_exit(&E->lock); 18775084c1b5Sriastradh 18785084c1b5Sriastradh /* Free the per-CPU data. */ 18795084c1b5Sriastradh percpu_free(rs->state, sizeof(struct rndsource_cpu)); 18805084c1b5Sriastradh } 18815084c1b5Sriastradh 18825084c1b5Sriastradh /* 188389444d3fSriastradh * rnd_lock_sources(flags) 1884708a423cSriastradh * 188589444d3fSriastradh * Lock the list of entropy sources. Caller must hold the global 188689444d3fSriastradh * entropy lock. If successful, no rndsource will go away until 188789444d3fSriastradh * rnd_unlock_sources even while the caller releases the global 188889444d3fSriastradh * entropy lock. 188989444d3fSriastradh * 18903586ae1dSriastradh * May be called very early at boot, before entropy_init. 18913586ae1dSriastradh * 189289444d3fSriastradh * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 189389444d3fSriastradh * If flags & ENTROPY_SIG, allow interruption by signal. 1894708a423cSriastradh */ 189589444d3fSriastradh static int __attribute__((warn_unused_result)) 189689444d3fSriastradh rnd_lock_sources(int flags) 1897708a423cSriastradh { 1898708a423cSriastradh int error; 1899708a423cSriastradh 19003586ae1dSriastradh KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 19013586ae1dSriastradh KASSERT(!cpu_intr_p()); 1902708a423cSriastradh 1903708a423cSriastradh while (E->sourcelock) { 19043586ae1dSriastradh KASSERT(!cold); 190589444d3fSriastradh if (!ISSET(flags, ENTROPY_WAIT)) 190689444d3fSriastradh return EWOULDBLOCK; 190789444d3fSriastradh if (ISSET(flags, ENTROPY_SIG)) { 190836a480a1Sriastradh error = cv_wait_sig(&E->sourcelock_cv, &E->lock); 1909708a423cSriastradh if (error) 1910708a423cSriastradh return error; 191189444d3fSriastradh } else { 191289444d3fSriastradh cv_wait(&E->sourcelock_cv, &E->lock); 191389444d3fSriastradh } 1914708a423cSriastradh } 1915708a423cSriastradh 1916708a423cSriastradh E->sourcelock = curlwp; 1917708a423cSriastradh return 0; 1918708a423cSriastradh } 1919708a423cSriastradh 1920708a423cSriastradh /* 1921708a423cSriastradh * rnd_unlock_sources() 1922708a423cSriastradh * 192389444d3fSriastradh * Unlock the list of sources after rnd_lock_sources. Caller must 192489444d3fSriastradh * hold the global entropy lock. 19253586ae1dSriastradh * 19263586ae1dSriastradh * May be called very early at boot, before entropy_init. 1927708a423cSriastradh */ 1928708a423cSriastradh static void 1929708a423cSriastradh rnd_unlock_sources(void) 1930708a423cSriastradh { 1931708a423cSriastradh 19323586ae1dSriastradh KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 19333586ae1dSriastradh KASSERT(!cpu_intr_p()); 1934708a423cSriastradh 19354f5c9c32Sriastradh KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 19364f5c9c32Sriastradh curlwp, E->sourcelock); 1937708a423cSriastradh E->sourcelock = NULL; 19383586ae1dSriastradh if (__predict_true(!cold)) 193936a480a1Sriastradh cv_signal(&E->sourcelock_cv); 1940708a423cSriastradh } 1941708a423cSriastradh 1942708a423cSriastradh /* 1943708a423cSriastradh * rnd_sources_locked() 1944708a423cSriastradh * 1945708a423cSriastradh * True if we hold the list of rndsources locked, for diagnostic 1946708a423cSriastradh * assertions. 19473586ae1dSriastradh * 19483586ae1dSriastradh * May be called very early at boot, before entropy_init. 1949708a423cSriastradh */ 1950af167962Sriastradh static bool __diagused 1951708a423cSriastradh rnd_sources_locked(void) 1952708a423cSriastradh { 1953708a423cSriastradh 19544f5c9c32Sriastradh return E->sourcelock == curlwp; 1955708a423cSriastradh } 1956708a423cSriastradh 1957708a423cSriastradh /* 195889444d3fSriastradh * entropy_request(nbytes, flags) 19595084c1b5Sriastradh * 19605084c1b5Sriastradh * Request nbytes bytes of entropy from all sources in the system. 19615084c1b5Sriastradh * OK if we overdo it. Caller must hold the global entropy lock; 19625084c1b5Sriastradh * will release and re-acquire it. 196389444d3fSriastradh * 19643586ae1dSriastradh * May be called very early at boot, before entropy_init. 19653586ae1dSriastradh * 196689444d3fSriastradh * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 196789444d3fSriastradh * If flags & ENTROPY_SIG, allow interruption by signal. 19685084c1b5Sriastradh */ 196989444d3fSriastradh static int 197089444d3fSriastradh entropy_request(size_t nbytes, int flags) 19715084c1b5Sriastradh { 1972708a423cSriastradh struct krndsource *rs; 197389444d3fSriastradh int error; 19745084c1b5Sriastradh 19753586ae1dSriastradh KASSERT(__predict_false(cold) || mutex_owned(&E->lock)); 19763586ae1dSriastradh KASSERT(!cpu_intr_p()); 19773586ae1dSriastradh if ((flags & ENTROPY_WAIT) != 0 && __predict_false(!cold)) 197889444d3fSriastradh ASSERT_SLEEPABLE(); 19795084c1b5Sriastradh 19805084c1b5Sriastradh /* 198189444d3fSriastradh * Lock the list of entropy sources to block rnd_detach_source 198289444d3fSriastradh * until we're done, and to serialize calls to the entropy 198389444d3fSriastradh * callbacks as guaranteed to drivers. 19845084c1b5Sriastradh */ 198589444d3fSriastradh error = rnd_lock_sources(flags); 198689444d3fSriastradh if (error) 198789444d3fSriastradh return error; 19885084c1b5Sriastradh entropy_request_evcnt.ev_count++; 19895084c1b5Sriastradh 19905084c1b5Sriastradh /* Clamp to the maximum reasonable request. */ 19915084c1b5Sriastradh nbytes = MIN(nbytes, ENTROPY_CAPACITY); 19925084c1b5Sriastradh 19935084c1b5Sriastradh /* Walk the list of sources. */ 1994708a423cSriastradh LIST_FOREACH(rs, &E->sources, list) { 19955084c1b5Sriastradh /* Skip sources without callbacks. */ 19965084c1b5Sriastradh if (!ISSET(rs->flags, RND_FLAG_HASCB)) 19975084c1b5Sriastradh continue; 19985084c1b5Sriastradh 1999a3f52b6eSriastradh /* 2000a3f52b6eSriastradh * Skip sources that are disabled altogether -- we 2001a3f52b6eSriastradh * would just ignore their samples anyway. 2002a3f52b6eSriastradh */ 2003a3f52b6eSriastradh if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 2004a3f52b6eSriastradh continue; 2005a3f52b6eSriastradh 20065084c1b5Sriastradh /* Drop the lock while we call the callback. */ 20073586ae1dSriastradh if (__predict_true(!cold)) 20085084c1b5Sriastradh mutex_exit(&E->lock); 20095084c1b5Sriastradh (*rs->get)(nbytes, rs->getarg); 20103586ae1dSriastradh if (__predict_true(!cold)) 20115084c1b5Sriastradh mutex_enter(&E->lock); 20125084c1b5Sriastradh } 20135084c1b5Sriastradh 201489444d3fSriastradh /* Request done; unlock the list of entropy sources. */ 2015708a423cSriastradh rnd_unlock_sources(); 201689444d3fSriastradh return 0; 20175084c1b5Sriastradh } 20185084c1b5Sriastradh 201996b2c7deSriastradh static inline uint32_t 202096b2c7deSriastradh rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 202196b2c7deSriastradh { 202296b2c7deSriastradh int32_t delta2, delta3; 202396b2c7deSriastradh 202496b2c7deSriastradh /* 202596b2c7deSriastradh * Calculate the second and third order differentials 202696b2c7deSriastradh */ 202796b2c7deSriastradh delta2 = d->dx - delta; 202896b2c7deSriastradh if (delta2 < 0) 202996b2c7deSriastradh delta2 = -delta2; /* XXX arithmetic overflow */ 203096b2c7deSriastradh 203196b2c7deSriastradh delta3 = d->d2x - delta2; 203296b2c7deSriastradh if (delta3 < 0) 203396b2c7deSriastradh delta3 = -delta3; /* XXX arithmetic overflow */ 203496b2c7deSriastradh 203596b2c7deSriastradh d->x = v; 203696b2c7deSriastradh d->dx = delta; 203796b2c7deSriastradh d->d2x = delta2; 203896b2c7deSriastradh 203996b2c7deSriastradh /* 204096b2c7deSriastradh * If any delta is 0, we got no entropy. If all are non-zero, we 204196b2c7deSriastradh * might have something. 204296b2c7deSriastradh */ 204396b2c7deSriastradh if (delta == 0 || delta2 == 0 || delta3 == 0) 204496b2c7deSriastradh return 0; 204596b2c7deSriastradh 204696b2c7deSriastradh return 1; 204796b2c7deSriastradh } 204896b2c7deSriastradh 204996b2c7deSriastradh static inline uint32_t 205096b2c7deSriastradh rnd_dt_estimate(struct krndsource *rs, uint32_t t) 205196b2c7deSriastradh { 205296b2c7deSriastradh int32_t delta; 205396b2c7deSriastradh uint32_t ret; 205496b2c7deSriastradh rnd_delta_t *d; 205596b2c7deSriastradh struct rndsource_cpu *rc; 205696b2c7deSriastradh 205796b2c7deSriastradh rc = percpu_getref(rs->state); 205896b2c7deSriastradh d = &rc->rc_timedelta; 205996b2c7deSriastradh 206096b2c7deSriastradh if (t < d->x) { 206196b2c7deSriastradh delta = UINT32_MAX - d->x + t; 206296b2c7deSriastradh } else { 206396b2c7deSriastradh delta = d->x - t; 206496b2c7deSriastradh } 206596b2c7deSriastradh 206696b2c7deSriastradh if (delta < 0) { 206796b2c7deSriastradh delta = -delta; /* XXX arithmetic overflow */ 206896b2c7deSriastradh } 206996b2c7deSriastradh 207096b2c7deSriastradh ret = rnd_delta_estimate(d, t, delta); 207196b2c7deSriastradh 207296b2c7deSriastradh KASSERT(d->x == t); 207396b2c7deSriastradh KASSERT(d->dx == delta); 207496b2c7deSriastradh percpu_putref(rs->state); 207596b2c7deSriastradh return ret; 207696b2c7deSriastradh } 207796b2c7deSriastradh 20785084c1b5Sriastradh /* 20795084c1b5Sriastradh * rnd_add_uint32(rs, value) 20805084c1b5Sriastradh * 20815084c1b5Sriastradh * Enter 32 bits of data from an entropy source into the pool. 20825084c1b5Sriastradh * 20833586ae1dSriastradh * May be called from any context or with spin locks held, but may 20843586ae1dSriastradh * drop data. 20855084c1b5Sriastradh * 20863586ae1dSriastradh * This is meant for cheaply taking samples from devices that 20873586ae1dSriastradh * aren't designed to be hardware random number generators. 20885084c1b5Sriastradh */ 20895084c1b5Sriastradh void 20905084c1b5Sriastradh rnd_add_uint32(struct krndsource *rs, uint32_t value) 20915084c1b5Sriastradh { 20923586ae1dSriastradh bool intr_p = true; 20935084c1b5Sriastradh 20943586ae1dSriastradh rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 20955084c1b5Sriastradh } 20965084c1b5Sriastradh 20975084c1b5Sriastradh void 20985084c1b5Sriastradh _rnd_add_uint32(struct krndsource *rs, uint32_t value) 20995084c1b5Sriastradh { 21003586ae1dSriastradh bool intr_p = true; 21015084c1b5Sriastradh 21023586ae1dSriastradh rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 21035084c1b5Sriastradh } 21045084c1b5Sriastradh 21055084c1b5Sriastradh void 21065084c1b5Sriastradh _rnd_add_uint64(struct krndsource *rs, uint64_t value) 21075084c1b5Sriastradh { 21083586ae1dSriastradh bool intr_p = true; 21095084c1b5Sriastradh 21103586ae1dSriastradh rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p); 21115084c1b5Sriastradh } 21125084c1b5Sriastradh 21135084c1b5Sriastradh /* 21145084c1b5Sriastradh * rnd_add_data(rs, buf, len, entropybits) 21155084c1b5Sriastradh * 21165084c1b5Sriastradh * Enter data from an entropy source into the pool, with a 21175084c1b5Sriastradh * driver's estimate of how much entropy the physical source of 21185084c1b5Sriastradh * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 21195084c1b5Sriastradh * estimate and treat it as zero. 21205084c1b5Sriastradh * 21213586ae1dSriastradh * rs MAY but SHOULD NOT be NULL. If rs is NULL, MUST NOT be 21223586ae1dSriastradh * called from interrupt context or with spin locks held. 21235084c1b5Sriastradh * 21243586ae1dSriastradh * If rs is non-NULL, MAY but SHOULD NOT be called from interrupt 21253586ae1dSriastradh * context, in which case act like rnd_add_data_intr -- if the 21263586ae1dSriastradh * sample buffer is full, schedule a softint and drop any 21273586ae1dSriastradh * additional data on the floor. (This may change later once we 21283586ae1dSriastradh * fix drivers that still call this from interrupt context to use 21293586ae1dSriastradh * rnd_add_data_intr instead.) MUST NOT be called with spin locks 21303586ae1dSriastradh * held if not in hard interrupt context -- i.e., MUST NOT be 21313586ae1dSriastradh * called in thread context or softint context with spin locks 21323586ae1dSriastradh * held. 21335084c1b5Sriastradh */ 21345084c1b5Sriastradh void 21355084c1b5Sriastradh rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 21365084c1b5Sriastradh uint32_t entropybits) 21375084c1b5Sriastradh { 21383586ae1dSriastradh bool intr_p = cpu_intr_p(); /* XXX make this unconditionally false */ 21395084c1b5Sriastradh 21403586ae1dSriastradh /* 21413586ae1dSriastradh * Weird legacy exception that we should rip out and replace by 21423586ae1dSriastradh * creating new rndsources to attribute entropy to the callers: 21433586ae1dSriastradh * If there's no rndsource, just enter the data and time now. 21443586ae1dSriastradh */ 21453586ae1dSriastradh if (rs == NULL) { 21463586ae1dSriastradh uint32_t extra; 21473586ae1dSriastradh 21483586ae1dSriastradh KASSERT(!intr_p); 21495084c1b5Sriastradh KASSERTMSG(howmany(entropybits, NBBY) <= len, 21505084c1b5Sriastradh "%s: impossible entropy rate:" 21515084c1b5Sriastradh " %"PRIu32" bits in %"PRIu32"-byte string", 21525084c1b5Sriastradh rs ? rs->name : "(anonymous)", entropybits, len); 215396b2c7deSriastradh entropy_enter(buf, len, entropybits, /*count*/false); 21545084c1b5Sriastradh extra = entropy_timer(); 215596b2c7deSriastradh entropy_enter(&extra, sizeof extra, 0, /*count*/false); 21565084c1b5Sriastradh explicit_memset(&extra, 0, sizeof extra); 21575084c1b5Sriastradh return; 21585084c1b5Sriastradh } 21595084c1b5Sriastradh 21603586ae1dSriastradh rnd_add_data_internal(rs, buf, len, entropybits, intr_p); 21613586ae1dSriastradh } 21623586ae1dSriastradh 21633586ae1dSriastradh /* 21643586ae1dSriastradh * rnd_add_data_intr(rs, buf, len, entropybits) 21653586ae1dSriastradh * 21663586ae1dSriastradh * Try to enter data from an entropy source into the pool, with a 21673586ae1dSriastradh * driver's estimate of how much entropy the physical source of 21683586ae1dSriastradh * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 21693586ae1dSriastradh * estimate and treat it as zero. If the sample buffer is full, 21703586ae1dSriastradh * schedule a softint and drop any additional data on the floor. 21713586ae1dSriastradh */ 21723586ae1dSriastradh void 21733586ae1dSriastradh rnd_add_data_intr(struct krndsource *rs, const void *buf, uint32_t len, 21743586ae1dSriastradh uint32_t entropybits) 21753586ae1dSriastradh { 21763586ae1dSriastradh bool intr_p = true; 21773586ae1dSriastradh 21783586ae1dSriastradh rnd_add_data_internal(rs, buf, len, entropybits, intr_p); 21793586ae1dSriastradh } 21803586ae1dSriastradh 21813586ae1dSriastradh /* 21823586ae1dSriastradh * rnd_add_data_internal(rs, buf, len, entropybits, intr_p) 21833586ae1dSriastradh * 21843586ae1dSriastradh * Internal subroutine to decide whether or not to enter data or 21853586ae1dSriastradh * timing for a particular rndsource, and if so, to enter it. 21863586ae1dSriastradh * 21873586ae1dSriastradh * intr_p is true for callers from interrupt context or spin locks 21883586ae1dSriastradh * held, and false for callers from thread or soft interrupt 21893586ae1dSriastradh * context and no spin locks held. 21903586ae1dSriastradh */ 21913586ae1dSriastradh static void 21923586ae1dSriastradh rnd_add_data_internal(struct krndsource *rs, const void *buf, uint32_t len, 21933586ae1dSriastradh uint32_t entropybits, bool intr_p) 21943586ae1dSriastradh { 21953586ae1dSriastradh uint32_t flags; 21963586ae1dSriastradh 21973586ae1dSriastradh KASSERTMSG(howmany(entropybits, NBBY) <= len, 21983586ae1dSriastradh "%s: impossible entropy rate:" 21993586ae1dSriastradh " %"PRIu32" bits in %"PRIu32"-byte string", 22003586ae1dSriastradh rs ? rs->name : "(anonymous)", entropybits, len); 22013586ae1dSriastradh 220228811d5eSriastradh /* 220328811d5eSriastradh * Hold up the reset xcall before it zeroes the entropy counts 220428811d5eSriastradh * on this CPU or globally. Otherwise, we might leave some 220528811d5eSriastradh * nonzero entropy attributed to an untrusted source in the 220628811d5eSriastradh * event of a race with a change to flags. 220728811d5eSriastradh */ 220828811d5eSriastradh kpreempt_disable(); 220928811d5eSriastradh 22105084c1b5Sriastradh /* Load a snapshot of the flags. Ioctl may change them under us. */ 22115084c1b5Sriastradh flags = atomic_load_relaxed(&rs->flags); 22125084c1b5Sriastradh 22135084c1b5Sriastradh /* 22145084c1b5Sriastradh * Skip if: 22155084c1b5Sriastradh * - we're not collecting entropy, or 22165084c1b5Sriastradh * - the operator doesn't want to collect entropy from this, or 22175084c1b5Sriastradh * - neither data nor timings are being collected from this. 22185084c1b5Sriastradh */ 22195084c1b5Sriastradh if (!atomic_load_relaxed(&entropy_collection) || 22205084c1b5Sriastradh ISSET(flags, RND_FLAG_NO_COLLECT) || 22215084c1b5Sriastradh !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 222228811d5eSriastradh goto out; 22235084c1b5Sriastradh 22245084c1b5Sriastradh /* If asked, ignore the estimate. */ 22255084c1b5Sriastradh if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 22265084c1b5Sriastradh entropybits = 0; 22275084c1b5Sriastradh 22285084c1b5Sriastradh /* If we are collecting data, enter them. */ 222996b2c7deSriastradh if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) { 223096b2c7deSriastradh rnd_add_data_1(rs, buf, len, entropybits, /*count*/false, 22313586ae1dSriastradh RND_FLAG_COLLECT_VALUE, intr_p); 223296b2c7deSriastradh } 22335084c1b5Sriastradh 22345084c1b5Sriastradh /* If we are collecting timings, enter one. */ 22355084c1b5Sriastradh if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 22363586ae1dSriastradh uint32_t extra; 223796b2c7deSriastradh bool count; 223896b2c7deSriastradh 223996b2c7deSriastradh /* Sample a timer. */ 22405084c1b5Sriastradh extra = entropy_timer(); 224196b2c7deSriastradh 224296b2c7deSriastradh /* If asked, do entropy estimation on the time. */ 224396b2c7deSriastradh if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) == 22443586ae1dSriastradh RND_FLAG_ESTIMATE_TIME && __predict_true(!cold)) 224596b2c7deSriastradh count = rnd_dt_estimate(rs, extra); 224696b2c7deSriastradh else 224796b2c7deSriastradh count = false; 224896b2c7deSriastradh 224996b2c7deSriastradh rnd_add_data_1(rs, &extra, sizeof extra, 0, count, 22503586ae1dSriastradh RND_FLAG_COLLECT_TIME, intr_p); 22515084c1b5Sriastradh } 225228811d5eSriastradh 225328811d5eSriastradh out: /* Allow concurrent changes to flags to finish. */ 225428811d5eSriastradh kpreempt_enable(); 22555084c1b5Sriastradh } 22565084c1b5Sriastradh 22573f5d9c7dSriastradh static unsigned 22583f5d9c7dSriastradh add_sat(unsigned a, unsigned b) 22593f5d9c7dSriastradh { 22603f5d9c7dSriastradh unsigned c = a + b; 22613f5d9c7dSriastradh 22623f5d9c7dSriastradh return (c < a ? UINT_MAX : c); 22633f5d9c7dSriastradh } 22643f5d9c7dSriastradh 22655084c1b5Sriastradh /* 226696b2c7deSriastradh * rnd_add_data_1(rs, buf, len, entropybits, count, flag) 22675084c1b5Sriastradh * 22685084c1b5Sriastradh * Internal subroutine to call either entropy_enter_intr, if we're 22695084c1b5Sriastradh * in interrupt context, or entropy_enter if not, and to count the 22705084c1b5Sriastradh * entropy in an rndsource. 22715084c1b5Sriastradh */ 22725084c1b5Sriastradh static void 22735084c1b5Sriastradh rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 22743586ae1dSriastradh uint32_t entropybits, bool count, uint32_t flag, bool intr_p) 22755084c1b5Sriastradh { 22765084c1b5Sriastradh bool fullyused; 22775084c1b5Sriastradh 22785084c1b5Sriastradh /* 22793586ae1dSriastradh * For the interrupt-like path, use entropy_enter_intr and take 22803586ae1dSriastradh * note of whether it consumed the full sample; otherwise, use 22813586ae1dSriastradh * entropy_enter, which always consumes the full sample. 22825084c1b5Sriastradh */ 22833586ae1dSriastradh if (intr_p) { 228496b2c7deSriastradh fullyused = entropy_enter_intr(buf, len, entropybits, count); 22855084c1b5Sriastradh } else { 228696b2c7deSriastradh entropy_enter(buf, len, entropybits, count); 22875084c1b5Sriastradh fullyused = true; 22885084c1b5Sriastradh } 22895084c1b5Sriastradh 22905084c1b5Sriastradh /* 22915084c1b5Sriastradh * If we used the full sample, note how many bits were 22925084c1b5Sriastradh * contributed from this source. 22935084c1b5Sriastradh */ 22945084c1b5Sriastradh if (fullyused) { 22953586ae1dSriastradh if (__predict_false(cold)) { 22963586ae1dSriastradh const int s = splhigh(); 22973f5d9c7dSriastradh rs->total = add_sat(rs->total, entropybits); 22983f5d9c7dSriastradh switch (flag) { 22993f5d9c7dSriastradh case RND_FLAG_COLLECT_TIME: 23003f5d9c7dSriastradh rs->time_delta.insamples = 23013f5d9c7dSriastradh add_sat(rs->time_delta.insamples, 1); 23023f5d9c7dSriastradh break; 23033f5d9c7dSriastradh case RND_FLAG_COLLECT_VALUE: 23043f5d9c7dSriastradh rs->value_delta.insamples = 23053f5d9c7dSriastradh add_sat(rs->value_delta.insamples, 1); 23063f5d9c7dSriastradh break; 23073f5d9c7dSriastradh } 23083586ae1dSriastradh splx(s); 23095084c1b5Sriastradh } else { 23105084c1b5Sriastradh struct rndsource_cpu *rc = percpu_getref(rs->state); 23115084c1b5Sriastradh 23123f5d9c7dSriastradh atomic_store_relaxed(&rc->rc_entropybits, 23133f5d9c7dSriastradh add_sat(rc->rc_entropybits, entropybits)); 23143f5d9c7dSriastradh switch (flag) { 23153f5d9c7dSriastradh case RND_FLAG_COLLECT_TIME: 23163f5d9c7dSriastradh atomic_store_relaxed(&rc->rc_timesamples, 23173f5d9c7dSriastradh add_sat(rc->rc_timesamples, 1)); 23183f5d9c7dSriastradh break; 23193f5d9c7dSriastradh case RND_FLAG_COLLECT_VALUE: 23203f5d9c7dSriastradh atomic_store_relaxed(&rc->rc_datasamples, 23213f5d9c7dSriastradh add_sat(rc->rc_datasamples, 1)); 23223f5d9c7dSriastradh break; 23233f5d9c7dSriastradh } 23245084c1b5Sriastradh percpu_putref(rs->state); 23255084c1b5Sriastradh } 23265084c1b5Sriastradh } 23275084c1b5Sriastradh } 23285084c1b5Sriastradh 23295084c1b5Sriastradh /* 23305084c1b5Sriastradh * rnd_add_data_sync(rs, buf, len, entropybits) 23315084c1b5Sriastradh * 23325084c1b5Sriastradh * Same as rnd_add_data. Originally used in rndsource callbacks, 23335084c1b5Sriastradh * to break an unnecessary cycle; no longer really needed. 23345084c1b5Sriastradh */ 23355084c1b5Sriastradh void 23365084c1b5Sriastradh rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 23375084c1b5Sriastradh uint32_t entropybits) 23385084c1b5Sriastradh { 23395084c1b5Sriastradh 23405084c1b5Sriastradh rnd_add_data(rs, buf, len, entropybits); 23415084c1b5Sriastradh } 23425084c1b5Sriastradh 23435084c1b5Sriastradh /* 23445084c1b5Sriastradh * rndsource_entropybits(rs) 23455084c1b5Sriastradh * 23465084c1b5Sriastradh * Return approximately the number of bits of entropy that have 23475084c1b5Sriastradh * been contributed via rs so far. Approximate if other CPUs may 23485084c1b5Sriastradh * be calling rnd_add_data concurrently. 23495084c1b5Sriastradh */ 23505084c1b5Sriastradh static unsigned 23515084c1b5Sriastradh rndsource_entropybits(struct krndsource *rs) 23525084c1b5Sriastradh { 23535084c1b5Sriastradh unsigned nbits = rs->total; 23545084c1b5Sriastradh 23553586ae1dSriastradh KASSERT(!cold); 2356708a423cSriastradh KASSERT(rnd_sources_locked()); 23575084c1b5Sriastradh percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 23585084c1b5Sriastradh return nbits; 23595084c1b5Sriastradh } 23605084c1b5Sriastradh 23615084c1b5Sriastradh static void 23625084c1b5Sriastradh rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 23635084c1b5Sriastradh { 23645084c1b5Sriastradh struct rndsource_cpu *rc = ptr; 23655084c1b5Sriastradh unsigned *nbitsp = cookie; 23665084c1b5Sriastradh unsigned cpu_nbits; 23675084c1b5Sriastradh 23683f5d9c7dSriastradh cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits); 23695084c1b5Sriastradh *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 23705084c1b5Sriastradh } 23715084c1b5Sriastradh 23725084c1b5Sriastradh /* 23735084c1b5Sriastradh * rndsource_to_user(rs, urs) 23745084c1b5Sriastradh * 23755084c1b5Sriastradh * Copy a description of rs out to urs for userland. 23765084c1b5Sriastradh */ 23775084c1b5Sriastradh static void 23785084c1b5Sriastradh rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 23795084c1b5Sriastradh { 23805084c1b5Sriastradh 23813586ae1dSriastradh KASSERT(!cold); 2382708a423cSriastradh KASSERT(rnd_sources_locked()); 23835084c1b5Sriastradh 23845084c1b5Sriastradh /* Avoid kernel memory disclosure. */ 23855084c1b5Sriastradh memset(urs, 0, sizeof(*urs)); 23865084c1b5Sriastradh 23875084c1b5Sriastradh CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 23885084c1b5Sriastradh strlcpy(urs->name, rs->name, sizeof(urs->name)); 23895084c1b5Sriastradh urs->total = rndsource_entropybits(rs); 23905084c1b5Sriastradh urs->type = rs->type; 23915084c1b5Sriastradh urs->flags = atomic_load_relaxed(&rs->flags); 23925084c1b5Sriastradh } 23935084c1b5Sriastradh 23945084c1b5Sriastradh /* 23955084c1b5Sriastradh * rndsource_to_user_est(rs, urse) 23965084c1b5Sriastradh * 23975084c1b5Sriastradh * Copy a description of rs and estimation statistics out to urse 23985084c1b5Sriastradh * for userland. 23995084c1b5Sriastradh */ 24005084c1b5Sriastradh static void 24015084c1b5Sriastradh rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 24025084c1b5Sriastradh { 24035084c1b5Sriastradh 24043586ae1dSriastradh KASSERT(!cold); 2405708a423cSriastradh KASSERT(rnd_sources_locked()); 24065084c1b5Sriastradh 24075084c1b5Sriastradh /* Avoid kernel memory disclosure. */ 24085084c1b5Sriastradh memset(urse, 0, sizeof(*urse)); 24095084c1b5Sriastradh 24105084c1b5Sriastradh /* Copy out the rndsource description. */ 24115084c1b5Sriastradh rndsource_to_user(rs, &urse->rt); 24125084c1b5Sriastradh 24133f5d9c7dSriastradh /* Gather the statistics. */ 24143f5d9c7dSriastradh urse->dt_samples = rs->time_delta.insamples; 24155084c1b5Sriastradh urse->dt_total = 0; 24163f5d9c7dSriastradh urse->dv_samples = rs->value_delta.insamples; 24173f5d9c7dSriastradh urse->dv_total = urse->rt.total; 24183f5d9c7dSriastradh percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse); 24193f5d9c7dSriastradh } 24203f5d9c7dSriastradh 24213f5d9c7dSriastradh static void 24223f5d9c7dSriastradh rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci) 24233f5d9c7dSriastradh { 24243f5d9c7dSriastradh struct rndsource_cpu *rc = ptr; 24253f5d9c7dSriastradh rndsource_est_t *urse = cookie; 24263f5d9c7dSriastradh 24273f5d9c7dSriastradh urse->dt_samples = add_sat(urse->dt_samples, 24283f5d9c7dSriastradh atomic_load_relaxed(&rc->rc_timesamples)); 24293f5d9c7dSriastradh urse->dv_samples = add_sat(urse->dv_samples, 24303f5d9c7dSriastradh atomic_load_relaxed(&rc->rc_datasamples)); 24315084c1b5Sriastradh } 24325084c1b5Sriastradh 24335084c1b5Sriastradh /* 24345b753169Sriastradh * entropy_reset_xc(arg1, arg2) 24355b753169Sriastradh * 24365b753169Sriastradh * Reset the current CPU's pending entropy to zero. 24375b753169Sriastradh */ 24385b753169Sriastradh static void 24395b753169Sriastradh entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 24405b753169Sriastradh { 24415b753169Sriastradh uint32_t extra = entropy_timer(); 2442450311ecSriastradh struct entropy_cpu_lock lock; 24435b753169Sriastradh struct entropy_cpu *ec; 24445b753169Sriastradh 24455b753169Sriastradh /* 2446450311ecSriastradh * With the per-CPU state locked, zero the pending count and 2447450311ecSriastradh * enter a cycle count for fun. 24485b753169Sriastradh */ 2449450311ecSriastradh ec = entropy_cpu_get(&lock); 245096b2c7deSriastradh ec->ec_bitspending = 0; 245196b2c7deSriastradh ec->ec_samplespending = 0; 24525b753169Sriastradh entpool_enter(ec->ec_pool, &extra, sizeof extra); 2453450311ecSriastradh entropy_cpu_put(&lock, ec); 24545b753169Sriastradh } 24555b753169Sriastradh 24565b753169Sriastradh /* 245745b27d01Sriastradh * entropy_reset() 245845b27d01Sriastradh * 245945b27d01Sriastradh * Assume the entropy pool has been exposed, e.g. because the VM 246045b27d01Sriastradh * has been cloned. Nix all the pending entropy and set the 246145b27d01Sriastradh * needed to maximum. 246245b27d01Sriastradh */ 246345b27d01Sriastradh void 246445b27d01Sriastradh entropy_reset(void) 246545b27d01Sriastradh { 246645b27d01Sriastradh 246745b27d01Sriastradh xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 246845b27d01Sriastradh mutex_enter(&E->lock); 246945b27d01Sriastradh E->bitspending = 0; 247045b27d01Sriastradh E->samplespending = 0; 247145b27d01Sriastradh atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS); 247245b27d01Sriastradh atomic_store_relaxed(&E->samplesneeded, MINSAMPLES); 247345b27d01Sriastradh E->consolidate = false; 247445b27d01Sriastradh mutex_exit(&E->lock); 247545b27d01Sriastradh } 247645b27d01Sriastradh 247745b27d01Sriastradh /* 24785084c1b5Sriastradh * entropy_ioctl(cmd, data) 24795084c1b5Sriastradh * 24805084c1b5Sriastradh * Handle various /dev/random ioctl queries. 24815084c1b5Sriastradh */ 24825084c1b5Sriastradh int 24835084c1b5Sriastradh entropy_ioctl(unsigned long cmd, void *data) 24845084c1b5Sriastradh { 24855084c1b5Sriastradh struct krndsource *rs; 24865084c1b5Sriastradh bool privileged; 24875084c1b5Sriastradh int error; 24885084c1b5Sriastradh 24893586ae1dSriastradh KASSERT(!cold); 24905084c1b5Sriastradh 24915084c1b5Sriastradh /* Verify user's authorization to perform the ioctl. */ 24925084c1b5Sriastradh switch (cmd) { 24935084c1b5Sriastradh case RNDGETENTCNT: 24945084c1b5Sriastradh case RNDGETPOOLSTAT: 24955084c1b5Sriastradh case RNDGETSRCNUM: 24965084c1b5Sriastradh case RNDGETSRCNAME: 24975084c1b5Sriastradh case RNDGETESTNUM: 24985084c1b5Sriastradh case RNDGETESTNAME: 2499813a709dSchristos error = kauth_authorize_device(kauth_cred_get(), 25005084c1b5Sriastradh KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 25015084c1b5Sriastradh break; 25025084c1b5Sriastradh case RNDCTL: 2503813a709dSchristos error = kauth_authorize_device(kauth_cred_get(), 25045084c1b5Sriastradh KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 25055084c1b5Sriastradh break; 25065084c1b5Sriastradh case RNDADDDATA: 2507813a709dSchristos error = kauth_authorize_device(kauth_cred_get(), 25085084c1b5Sriastradh KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 25095084c1b5Sriastradh /* Ascertain whether the user's inputs should be counted. */ 2510813a709dSchristos if (kauth_authorize_device(kauth_cred_get(), 25115084c1b5Sriastradh KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 25125084c1b5Sriastradh NULL, NULL, NULL, NULL) == 0) 25135084c1b5Sriastradh privileged = true; 25145084c1b5Sriastradh break; 25155084c1b5Sriastradh default: { 25165084c1b5Sriastradh /* 25175084c1b5Sriastradh * XXX Hack to avoid changing module ABI so this can be 25185084c1b5Sriastradh * pulled up. Later, we can just remove the argument. 25195084c1b5Sriastradh */ 25205084c1b5Sriastradh static const struct fileops fops = { 25215084c1b5Sriastradh .fo_ioctl = rnd_system_ioctl, 25225084c1b5Sriastradh }; 25235084c1b5Sriastradh struct file f = { 25245084c1b5Sriastradh .f_ops = &fops, 25255084c1b5Sriastradh }; 25265084c1b5Sriastradh MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 25275084c1b5Sriastradh enosys(), error); 25285084c1b5Sriastradh #if defined(_LP64) 25295084c1b5Sriastradh if (error == ENOSYS) 25305084c1b5Sriastradh MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 25315084c1b5Sriastradh enosys(), error); 25325084c1b5Sriastradh #endif 25335084c1b5Sriastradh if (error == ENOSYS) 25345084c1b5Sriastradh error = ENOTTY; 25355084c1b5Sriastradh break; 25365084c1b5Sriastradh } 25375084c1b5Sriastradh } 25385084c1b5Sriastradh 25395084c1b5Sriastradh /* If anything went wrong with authorization, stop here. */ 25405084c1b5Sriastradh if (error) 25415084c1b5Sriastradh return error; 25425084c1b5Sriastradh 25435084c1b5Sriastradh /* Dispatch on the command. */ 25445084c1b5Sriastradh switch (cmd) { 25455084c1b5Sriastradh case RNDGETENTCNT: { /* Get current entropy count in bits. */ 25465084c1b5Sriastradh uint32_t *countp = data; 25475084c1b5Sriastradh 25485084c1b5Sriastradh mutex_enter(&E->lock); 254996b2c7deSriastradh *countp = MINENTROPYBITS - E->bitsneeded; 25505084c1b5Sriastradh mutex_exit(&E->lock); 25515084c1b5Sriastradh 25525084c1b5Sriastradh break; 25535084c1b5Sriastradh } 25545084c1b5Sriastradh case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 25555084c1b5Sriastradh rndpoolstat_t *pstat = data; 25565084c1b5Sriastradh 25575084c1b5Sriastradh mutex_enter(&E->lock); 25585084c1b5Sriastradh 25595084c1b5Sriastradh /* parameters */ 25605084c1b5Sriastradh pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 256196b2c7deSriastradh pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */ 25625084c1b5Sriastradh pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 25635084c1b5Sriastradh 25645084c1b5Sriastradh /* state */ 25655084c1b5Sriastradh pstat->added = 0; /* XXX total entropy_enter count */ 256696b2c7deSriastradh pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */ 25675084c1b5Sriastradh pstat->removed = 0; /* XXX total entropy_extract count */ 25685084c1b5Sriastradh pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 256996b2c7deSriastradh 257096b2c7deSriastradh /* 257196b2c7deSriastradh * This used to be bits of data fabricated in some 257296b2c7deSriastradh * sense; we'll take it to mean number of samples, 257396b2c7deSriastradh * excluding the bits of entropy from HWRNG or seed. 257496b2c7deSriastradh */ 257596b2c7deSriastradh pstat->generated = MINSAMPLES - E->samplesneeded; 257696b2c7deSriastradh pstat->generated -= MIN(pstat->generated, pstat->curentropy); 25775084c1b5Sriastradh 25785084c1b5Sriastradh mutex_exit(&E->lock); 25795084c1b5Sriastradh break; 25805084c1b5Sriastradh } 25815084c1b5Sriastradh case RNDGETSRCNUM: { /* Get entropy sources by number. */ 25825084c1b5Sriastradh rndstat_t *stat = data; 25835084c1b5Sriastradh uint32_t start = 0, i = 0; 25845084c1b5Sriastradh 25855084c1b5Sriastradh /* Skip if none requested; fail if too many requested. */ 25865084c1b5Sriastradh if (stat->count == 0) 25875084c1b5Sriastradh break; 25885084c1b5Sriastradh if (stat->count > RND_MAXSTATCOUNT) 25895084c1b5Sriastradh return EINVAL; 25905084c1b5Sriastradh 25915084c1b5Sriastradh /* 25925084c1b5Sriastradh * Under the lock, find the first one, copy out as many 25935084c1b5Sriastradh * as requested, and report how many we copied out. 25945084c1b5Sriastradh */ 25955084c1b5Sriastradh mutex_enter(&E->lock); 259689444d3fSriastradh error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2597708a423cSriastradh if (error) { 2598708a423cSriastradh mutex_exit(&E->lock); 2599708a423cSriastradh return error; 2600708a423cSriastradh } 26015084c1b5Sriastradh LIST_FOREACH(rs, &E->sources, list) { 26025084c1b5Sriastradh if (start++ == stat->start) 26035084c1b5Sriastradh break; 26045084c1b5Sriastradh } 26055084c1b5Sriastradh while (i < stat->count && rs != NULL) { 2606f36f13f0Sriastradh mutex_exit(&E->lock); 26075084c1b5Sriastradh rndsource_to_user(rs, &stat->source[i++]); 2608f36f13f0Sriastradh mutex_enter(&E->lock); 26095084c1b5Sriastradh rs = LIST_NEXT(rs, list); 26105084c1b5Sriastradh } 26115084c1b5Sriastradh KASSERT(i <= stat->count); 26125084c1b5Sriastradh stat->count = i; 2613708a423cSriastradh rnd_unlock_sources(); 26145084c1b5Sriastradh mutex_exit(&E->lock); 26155084c1b5Sriastradh break; 26165084c1b5Sriastradh } 26175084c1b5Sriastradh case RNDGETESTNUM: { /* Get sources and estimates by number. */ 26185084c1b5Sriastradh rndstat_est_t *estat = data; 26195084c1b5Sriastradh uint32_t start = 0, i = 0; 26205084c1b5Sriastradh 26215084c1b5Sriastradh /* Skip if none requested; fail if too many requested. */ 26225084c1b5Sriastradh if (estat->count == 0) 26235084c1b5Sriastradh break; 26245084c1b5Sriastradh if (estat->count > RND_MAXSTATCOUNT) 26255084c1b5Sriastradh return EINVAL; 26265084c1b5Sriastradh 26275084c1b5Sriastradh /* 26285084c1b5Sriastradh * Under the lock, find the first one, copy out as many 26295084c1b5Sriastradh * as requested, and report how many we copied out. 26305084c1b5Sriastradh */ 26315084c1b5Sriastradh mutex_enter(&E->lock); 263289444d3fSriastradh error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2633708a423cSriastradh if (error) { 2634708a423cSriastradh mutex_exit(&E->lock); 2635708a423cSriastradh return error; 2636708a423cSriastradh } 26375084c1b5Sriastradh LIST_FOREACH(rs, &E->sources, list) { 26385084c1b5Sriastradh if (start++ == estat->start) 26395084c1b5Sriastradh break; 26405084c1b5Sriastradh } 26415084c1b5Sriastradh while (i < estat->count && rs != NULL) { 2642708a423cSriastradh mutex_exit(&E->lock); 26435084c1b5Sriastradh rndsource_to_user_est(rs, &estat->source[i++]); 2644708a423cSriastradh mutex_enter(&E->lock); 26455084c1b5Sriastradh rs = LIST_NEXT(rs, list); 26465084c1b5Sriastradh } 26475084c1b5Sriastradh KASSERT(i <= estat->count); 26485084c1b5Sriastradh estat->count = i; 2649708a423cSriastradh rnd_unlock_sources(); 26505084c1b5Sriastradh mutex_exit(&E->lock); 26515084c1b5Sriastradh break; 26525084c1b5Sriastradh } 26535084c1b5Sriastradh case RNDGETSRCNAME: { /* Get entropy sources by name. */ 26545084c1b5Sriastradh rndstat_name_t *nstat = data; 26555084c1b5Sriastradh const size_t n = sizeof(rs->name); 26565084c1b5Sriastradh 26575084c1b5Sriastradh CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 26585084c1b5Sriastradh 26595084c1b5Sriastradh /* 26605084c1b5Sriastradh * Under the lock, search by name. If found, copy it 26615084c1b5Sriastradh * out; if not found, fail with ENOENT. 26625084c1b5Sriastradh */ 26635084c1b5Sriastradh mutex_enter(&E->lock); 266489444d3fSriastradh error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2665708a423cSriastradh if (error) { 2666708a423cSriastradh mutex_exit(&E->lock); 2667708a423cSriastradh return error; 2668708a423cSriastradh } 26695084c1b5Sriastradh LIST_FOREACH(rs, &E->sources, list) { 26705084c1b5Sriastradh if (strncmp(rs->name, nstat->name, n) == 0) 26715084c1b5Sriastradh break; 26725084c1b5Sriastradh } 2673708a423cSriastradh if (rs != NULL) { 2674708a423cSriastradh mutex_exit(&E->lock); 26755084c1b5Sriastradh rndsource_to_user(rs, &nstat->source); 2676708a423cSriastradh mutex_enter(&E->lock); 2677708a423cSriastradh } else { 26785084c1b5Sriastradh error = ENOENT; 2679708a423cSriastradh } 2680708a423cSriastradh rnd_unlock_sources(); 26815084c1b5Sriastradh mutex_exit(&E->lock); 26825084c1b5Sriastradh break; 26835084c1b5Sriastradh } 26845084c1b5Sriastradh case RNDGETESTNAME: { /* Get sources and estimates by name. */ 26855084c1b5Sriastradh rndstat_est_name_t *enstat = data; 26865084c1b5Sriastradh const size_t n = sizeof(rs->name); 26875084c1b5Sriastradh 26885084c1b5Sriastradh CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 26895084c1b5Sriastradh 26905084c1b5Sriastradh /* 26915084c1b5Sriastradh * Under the lock, search by name. If found, copy it 26925084c1b5Sriastradh * out; if not found, fail with ENOENT. 26935084c1b5Sriastradh */ 26945084c1b5Sriastradh mutex_enter(&E->lock); 269589444d3fSriastradh error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2696708a423cSriastradh if (error) { 2697708a423cSriastradh mutex_exit(&E->lock); 2698708a423cSriastradh return error; 2699708a423cSriastradh } 27005084c1b5Sriastradh LIST_FOREACH(rs, &E->sources, list) { 27015084c1b5Sriastradh if (strncmp(rs->name, enstat->name, n) == 0) 27025084c1b5Sriastradh break; 27035084c1b5Sriastradh } 2704708a423cSriastradh if (rs != NULL) { 2705708a423cSriastradh mutex_exit(&E->lock); 27065084c1b5Sriastradh rndsource_to_user_est(rs, &enstat->source); 2707708a423cSriastradh mutex_enter(&E->lock); 2708708a423cSriastradh } else { 27095084c1b5Sriastradh error = ENOENT; 2710708a423cSriastradh } 2711708a423cSriastradh rnd_unlock_sources(); 27125084c1b5Sriastradh mutex_exit(&E->lock); 27135084c1b5Sriastradh break; 27145084c1b5Sriastradh } 27155084c1b5Sriastradh case RNDCTL: { /* Modify entropy source flags. */ 27165084c1b5Sriastradh rndctl_t *rndctl = data; 27175084c1b5Sriastradh const size_t n = sizeof(rs->name); 27185b753169Sriastradh uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 27195084c1b5Sriastradh uint32_t flags; 27205b753169Sriastradh bool reset = false, request = false; 27215084c1b5Sriastradh 27225084c1b5Sriastradh CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 27235084c1b5Sriastradh 27245084c1b5Sriastradh /* Whitelist the flags that user can change. */ 27255084c1b5Sriastradh rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 27265084c1b5Sriastradh 27275084c1b5Sriastradh /* 27285084c1b5Sriastradh * For each matching rndsource, either by type if 27295084c1b5Sriastradh * specified or by name if not, set the masked flags. 27305084c1b5Sriastradh */ 27315084c1b5Sriastradh mutex_enter(&E->lock); 27325084c1b5Sriastradh LIST_FOREACH(rs, &E->sources, list) { 27335084c1b5Sriastradh if (rndctl->type != 0xff) { 27345084c1b5Sriastradh if (rs->type != rndctl->type) 27355084c1b5Sriastradh continue; 273630c052bdSriastradh } else if (rndctl->name[0] != '\0') { 27375084c1b5Sriastradh if (strncmp(rs->name, rndctl->name, n) != 0) 27385084c1b5Sriastradh continue; 27395084c1b5Sriastradh } 27405084c1b5Sriastradh flags = rs->flags & ~rndctl->mask; 27415084c1b5Sriastradh flags |= rndctl->flags & rndctl->mask; 27425b753169Sriastradh if ((rs->flags & resetflags) == 0 && 27435b753169Sriastradh (flags & resetflags) != 0) 27445b753169Sriastradh reset = true; 27455b753169Sriastradh if ((rs->flags ^ flags) & resetflags) 27465b753169Sriastradh request = true; 27475084c1b5Sriastradh atomic_store_relaxed(&rs->flags, flags); 27485084c1b5Sriastradh } 27495084c1b5Sriastradh mutex_exit(&E->lock); 27505b753169Sriastradh 27515b753169Sriastradh /* 27525b753169Sriastradh * If we disabled estimation or collection, nix all the 27535b753169Sriastradh * pending entropy and set needed to the maximum. 27545b753169Sriastradh */ 275545b27d01Sriastradh if (reset) 275645b27d01Sriastradh entropy_reset(); 27575b753169Sriastradh 27585b753169Sriastradh /* 27595b753169Sriastradh * If we changed any of the estimation or collection 27605b753169Sriastradh * flags, request new samples from everyone -- either 27615b753169Sriastradh * to make up for what we just lost, or to get new 27625b753169Sriastradh * samples from what we just added. 276389444d3fSriastradh * 276489444d3fSriastradh * Failing on signal, while waiting for another process 276589444d3fSriastradh * to finish requesting entropy, is OK here even though 276689444d3fSriastradh * we have committed side effects, because this ioctl 276789444d3fSriastradh * command is idempotent, so repeating it is safe. 27685b753169Sriastradh */ 276945b27d01Sriastradh if (request) 277045b27d01Sriastradh error = entropy_gather(); 27715084c1b5Sriastradh break; 27725084c1b5Sriastradh } 27735084c1b5Sriastradh case RNDADDDATA: { /* Enter seed into entropy pool. */ 27745084c1b5Sriastradh rnddata_t *rdata = data; 27755084c1b5Sriastradh unsigned entropybits = 0; 27765084c1b5Sriastradh 27775084c1b5Sriastradh if (!atomic_load_relaxed(&entropy_collection)) 27785084c1b5Sriastradh break; /* thanks but no thanks */ 27795084c1b5Sriastradh if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 27805084c1b5Sriastradh return EINVAL; 27815084c1b5Sriastradh 27825084c1b5Sriastradh /* 27835084c1b5Sriastradh * This ioctl serves as the userland alternative a 27845084c1b5Sriastradh * bootloader-provided seed -- typically furnished by 27855084c1b5Sriastradh * /etc/rc.d/random_seed. We accept the user's entropy 27865084c1b5Sriastradh * claim only if 27875084c1b5Sriastradh * 27885084c1b5Sriastradh * (a) the user is privileged, and 27895084c1b5Sriastradh * (b) we have not entered a bootloader seed. 27905084c1b5Sriastradh * 27915084c1b5Sriastradh * under the assumption that the user may use this to 27925084c1b5Sriastradh * load a seed from disk that we have already loaded 27935084c1b5Sriastradh * from the bootloader, so we don't double-count it. 27945084c1b5Sriastradh */ 27959d3b7ca9Sriastradh if (privileged && rdata->entropy && rdata->len) { 27965084c1b5Sriastradh mutex_enter(&E->lock); 27975084c1b5Sriastradh if (!E->seeded) { 27985084c1b5Sriastradh entropybits = MIN(rdata->entropy, 27995084c1b5Sriastradh MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 28005084c1b5Sriastradh E->seeded = true; 28015084c1b5Sriastradh } 28025084c1b5Sriastradh mutex_exit(&E->lock); 28035084c1b5Sriastradh } 28045084c1b5Sriastradh 2805bbed1747Sriastradh /* Enter the data and consolidate entropy. */ 28065084c1b5Sriastradh rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 28075084c1b5Sriastradh entropybits); 2808*4dd1804bSriastradh error = entropy_consolidate(); 28095084c1b5Sriastradh break; 28105084c1b5Sriastradh } 28115084c1b5Sriastradh default: 28125084c1b5Sriastradh error = ENOTTY; 28135084c1b5Sriastradh } 28145084c1b5Sriastradh 28155084c1b5Sriastradh /* Return any error that may have come up. */ 28165084c1b5Sriastradh return error; 28175084c1b5Sriastradh } 28185084c1b5Sriastradh 28195084c1b5Sriastradh /* Legacy entry points */ 28205084c1b5Sriastradh 28215084c1b5Sriastradh void 28225084c1b5Sriastradh rnd_seed(void *seed, size_t len) 28235084c1b5Sriastradh { 28245084c1b5Sriastradh 28255084c1b5Sriastradh if (len != sizeof(rndsave_t)) { 28265084c1b5Sriastradh printf("entropy: invalid seed length: %zu," 28275084c1b5Sriastradh " expected sizeof(rndsave_t) = %zu\n", 28285084c1b5Sriastradh len, sizeof(rndsave_t)); 28295084c1b5Sriastradh return; 28305084c1b5Sriastradh } 28315084c1b5Sriastradh entropy_seed(seed); 28325084c1b5Sriastradh } 28335084c1b5Sriastradh 28345084c1b5Sriastradh void 28355084c1b5Sriastradh rnd_init(void) 28365084c1b5Sriastradh { 28375084c1b5Sriastradh 28385084c1b5Sriastradh entropy_init(); 28395084c1b5Sriastradh } 28405084c1b5Sriastradh 28415084c1b5Sriastradh void 28425084c1b5Sriastradh rnd_init_softint(void) 28435084c1b5Sriastradh { 28445084c1b5Sriastradh 28455084c1b5Sriastradh entropy_init_late(); 2846e4ceb72eSriastradh entropy_bootrequest(); 28475084c1b5Sriastradh } 28485084c1b5Sriastradh 28495084c1b5Sriastradh int 28505084c1b5Sriastradh rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 28515084c1b5Sriastradh { 28525084c1b5Sriastradh 28535084c1b5Sriastradh return entropy_ioctl(cmd, data); 28545084c1b5Sriastradh } 2855