1 /* $NetBSD: kern_entropy.c,v 1.54 2022/03/24 12:58:56 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * No entropy estimation based on the sample values, which is a 66 * contradiction in terms and a potential source of side 67 * channels. It is the responsibility of the driver author to 68 * study how predictable the physical source of input can ever 69 * be, and to furnish a lower bound on the amount of entropy it 70 * has. 71 * 72 * * Entropy depletion is available for testing (or if you're into 73 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 74 * the logic to support it is small, to minimize chance of bugs. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.54 2022/03/24 12:58:56 riastradh Exp $"); 79 80 #include <sys/param.h> 81 #include <sys/types.h> 82 #include <sys/atomic.h> 83 #include <sys/compat_stub.h> 84 #include <sys/condvar.h> 85 #include <sys/cpu.h> 86 #include <sys/entropy.h> 87 #include <sys/errno.h> 88 #include <sys/evcnt.h> 89 #include <sys/event.h> 90 #include <sys/file.h> 91 #include <sys/intr.h> 92 #include <sys/kauth.h> 93 #include <sys/kernel.h> 94 #include <sys/kmem.h> 95 #include <sys/kthread.h> 96 #include <sys/lwp.h> 97 #include <sys/module_hook.h> 98 #include <sys/mutex.h> 99 #include <sys/percpu.h> 100 #include <sys/poll.h> 101 #include <sys/proc.h> 102 #include <sys/queue.h> 103 #include <sys/reboot.h> 104 #include <sys/rnd.h> /* legacy kernel API */ 105 #include <sys/rndio.h> /* userland ioctl interface */ 106 #include <sys/rndsource.h> /* kernel rndsource driver API */ 107 #include <sys/select.h> 108 #include <sys/selinfo.h> 109 #include <sys/sha1.h> /* for boot seed checksum */ 110 #include <sys/stdint.h> 111 #include <sys/sysctl.h> 112 #include <sys/syslog.h> 113 #include <sys/systm.h> 114 #include <sys/time.h> 115 #include <sys/xcall.h> 116 117 #include <lib/libkern/entpool.h> 118 119 #include <machine/limits.h> 120 121 #ifdef __HAVE_CPU_COUNTER 122 #include <machine/cpu_counter.h> 123 #endif 124 125 /* 126 * struct entropy_cpu 127 * 128 * Per-CPU entropy state. The pool is allocated separately 129 * because percpu(9) sometimes moves per-CPU objects around 130 * without zeroing them, which would lead to unwanted copies of 131 * sensitive secrets. The evcnt is allocated separately because 132 * evcnt(9) assumes it stays put in memory. 133 */ 134 struct entropy_cpu { 135 struct entropy_cpu_evcnt { 136 struct evcnt softint; 137 struct evcnt intrdrop; 138 struct evcnt intrtrunc; 139 } *ec_evcnt; 140 struct entpool *ec_pool; 141 unsigned ec_pending; 142 bool ec_locked; 143 }; 144 145 /* 146 * struct entropy_cpu_lock 147 * 148 * State for locking the per-CPU entropy state. 149 */ 150 struct entropy_cpu_lock { 151 int ecl_s; 152 uint64_t ecl_ncsw; 153 }; 154 155 /* 156 * struct rndsource_cpu 157 * 158 * Per-CPU rndsource state. 159 */ 160 struct rndsource_cpu { 161 unsigned rc_entropybits; 162 unsigned rc_timesamples; 163 unsigned rc_datasamples; 164 }; 165 166 /* 167 * entropy_global (a.k.a. E for short in this file) 168 * 169 * Global entropy state. Writes protected by the global lock. 170 * Some fields, marked (A), can be read outside the lock, and are 171 * maintained with atomic_load/store_relaxed. 172 */ 173 struct { 174 kmutex_t lock; /* covers all global state */ 175 struct entpool pool; /* global pool for extraction */ 176 unsigned needed; /* (A) needed globally */ 177 unsigned pending; /* (A) pending in per-CPU pools */ 178 unsigned timestamp; /* (A) time of last consolidation */ 179 unsigned epoch; /* (A) changes when needed -> 0 */ 180 kcondvar_t cv; /* notifies state changes */ 181 struct selinfo selq; /* notifies needed -> 0 */ 182 struct lwp *sourcelock; /* lock on list of sources */ 183 kcondvar_t sourcelock_cv; /* notifies sourcelock release */ 184 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 185 enum entropy_stage { 186 ENTROPY_COLD = 0, /* single-threaded */ 187 ENTROPY_WARM, /* multi-threaded at boot before CPUs */ 188 ENTROPY_HOT, /* multi-threaded multi-CPU */ 189 } stage; 190 bool consolidate; /* kick thread to consolidate */ 191 bool seed_rndsource; /* true if seed source is attached */ 192 bool seeded; /* true if seed file already loaded */ 193 } entropy_global __cacheline_aligned = { 194 /* Fields that must be initialized when the kernel is loaded. */ 195 .needed = ENTROPY_CAPACITY*NBBY, 196 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 197 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 198 .stage = ENTROPY_COLD, 199 }; 200 201 #define E (&entropy_global) /* declutter */ 202 203 /* Read-mostly globals */ 204 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 205 static void *entropy_sih __read_mostly; /* softint handler */ 206 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 207 208 static struct krndsource seed_rndsource __read_mostly; 209 210 /* 211 * Event counters 212 * 213 * Must be careful with adding these because they can serve as 214 * side channels. 215 */ 216 static struct evcnt entropy_discretionary_evcnt = 217 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 218 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 219 static struct evcnt entropy_immediate_evcnt = 220 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 221 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 222 static struct evcnt entropy_partial_evcnt = 223 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 224 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 225 static struct evcnt entropy_consolidate_evcnt = 226 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 227 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 228 static struct evcnt entropy_extract_fail_evcnt = 229 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 230 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 231 static struct evcnt entropy_request_evcnt = 232 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 233 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 234 static struct evcnt entropy_deplete_evcnt = 235 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 236 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 237 static struct evcnt entropy_notify_evcnt = 238 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 239 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 240 241 /* Sysctl knobs */ 242 static bool entropy_collection = 1; 243 static bool entropy_depletion = 0; /* Silly! */ 244 245 static const struct sysctlnode *entropy_sysctlroot; 246 static struct sysctllog *entropy_sysctllog; 247 248 /* Forward declarations */ 249 static void entropy_init_cpu(void *, void *, struct cpu_info *); 250 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 251 static void entropy_account_cpu(struct entropy_cpu *); 252 static void entropy_enter(const void *, size_t, unsigned); 253 static bool entropy_enter_intr(const void *, size_t, unsigned); 254 static void entropy_softintr(void *); 255 static void entropy_thread(void *); 256 static uint32_t entropy_pending(void); 257 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 258 static void entropy_do_consolidate(void); 259 static void entropy_consolidate_xc(void *, void *); 260 static void entropy_notify(void); 261 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 262 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 263 static void filt_entropy_read_detach(struct knote *); 264 static int filt_entropy_read_event(struct knote *, long); 265 static int entropy_request(size_t, int); 266 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 267 uint32_t, uint32_t); 268 static unsigned rndsource_entropybits(struct krndsource *); 269 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 270 static void rndsource_to_user(struct krndsource *, rndsource_t *); 271 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 272 static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *); 273 274 /* 275 * entropy_timer() 276 * 277 * Cycle counter, time counter, or anything that changes a wee bit 278 * unpredictably. 279 */ 280 static inline uint32_t 281 entropy_timer(void) 282 { 283 struct bintime bt; 284 uint32_t v; 285 286 /* If we have a CPU cycle counter, use the low 32 bits. */ 287 #ifdef __HAVE_CPU_COUNTER 288 if (__predict_true(cpu_hascounter())) 289 return cpu_counter32(); 290 #endif /* __HAVE_CPU_COUNTER */ 291 292 /* If we're cold, tough. Can't binuptime while cold. */ 293 if (__predict_false(cold)) 294 return 0; 295 296 /* Fold the 128 bits of binuptime into 32 bits. */ 297 binuptime(&bt); 298 v = bt.frac; 299 v ^= bt.frac >> 32; 300 v ^= bt.sec; 301 v ^= bt.sec >> 32; 302 return v; 303 } 304 305 static void 306 attach_seed_rndsource(void) 307 { 308 309 /* 310 * First called no later than entropy_init, while we are still 311 * single-threaded, so no need for RUN_ONCE. 312 */ 313 if (E->stage >= ENTROPY_WARM || E->seed_rndsource) 314 return; 315 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 316 RND_FLAG_COLLECT_VALUE); 317 E->seed_rndsource = true; 318 } 319 320 /* 321 * entropy_init() 322 * 323 * Initialize the entropy subsystem. Panic on failure. 324 * 325 * Requires percpu(9) and sysctl(9) to be initialized. 326 */ 327 static void 328 entropy_init(void) 329 { 330 uint32_t extra[2]; 331 struct krndsource *rs; 332 unsigned i = 0; 333 334 KASSERT(E->stage == ENTROPY_COLD); 335 336 /* Grab some cycle counts early at boot. */ 337 extra[i++] = entropy_timer(); 338 339 /* Run the entropy pool cryptography self-test. */ 340 if (entpool_selftest() == -1) 341 panic("entropy pool crypto self-test failed"); 342 343 /* Create the sysctl directory. */ 344 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 345 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 346 SYSCTL_DESCR("Entropy (random number sources) options"), 347 NULL, 0, NULL, 0, 348 CTL_KERN, CTL_CREATE, CTL_EOL); 349 350 /* Create the sysctl knobs. */ 351 /* XXX These shouldn't be writable at securelevel>0. */ 352 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 353 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 354 SYSCTL_DESCR("Automatically collect entropy from hardware"), 355 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 356 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 357 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 358 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 359 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 360 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 361 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 362 SYSCTL_DESCR("Trigger entropy consolidation now"), 363 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 364 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 365 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 366 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 367 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 368 /* XXX These should maybe not be readable at securelevel>0. */ 369 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 370 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 371 "needed", SYSCTL_DESCR("Systemwide entropy deficit"), 372 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); 373 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 374 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 375 "pending", SYSCTL_DESCR("Entropy pending on CPUs"), 376 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); 377 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 378 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 379 "epoch", SYSCTL_DESCR("Entropy epoch"), 380 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 381 382 /* Initialize the global state for multithreaded operation. */ 383 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); 384 cv_init(&E->cv, "entropy"); 385 selinit(&E->selq); 386 cv_init(&E->sourcelock_cv, "entsrclock"); 387 388 /* Make sure the seed source is attached. */ 389 attach_seed_rndsource(); 390 391 /* Note if the bootloader didn't provide a seed. */ 392 if (!E->seeded) 393 aprint_debug("entropy: no seed from bootloader\n"); 394 395 /* Allocate the per-CPU records for all early entropy sources. */ 396 LIST_FOREACH(rs, &E->sources, list) 397 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 398 399 /* Allocate and initialize the per-CPU state. */ 400 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 401 entropy_init_cpu, entropy_fini_cpu, NULL); 402 403 /* Enter the boot cycle count to get started. */ 404 extra[i++] = entropy_timer(); 405 KASSERT(i == __arraycount(extra)); 406 entropy_enter(extra, sizeof extra, 0); 407 explicit_memset(extra, 0, sizeof extra); 408 409 /* We are now ready for multi-threaded operation. */ 410 E->stage = ENTROPY_WARM; 411 } 412 413 static void 414 entropy_init_late_cpu(void *a, void *b) 415 { 416 int bound; 417 418 /* 419 * We're not necessarily in a softint lwp here (xc_broadcast 420 * triggers softint on other CPUs, but calls directly on this 421 * CPU), so explicitly bind to the current CPU to invoke the 422 * softintr -- this lets us have a simpler assertion in 423 * entropy_account_cpu. Not necessary to avoid migration 424 * because xc_broadcast disables kpreemption anyway, but it 425 * doesn't hurt. 426 */ 427 bound = curlwp_bind(); 428 entropy_softintr(NULL); 429 curlwp_bindx(bound); 430 } 431 432 /* 433 * entropy_init_late() 434 * 435 * Late initialization. Panic on failure. 436 * 437 * Requires CPUs to have been detected and LWPs to have started. 438 */ 439 static void 440 entropy_init_late(void) 441 { 442 void *sih; 443 int error; 444 445 KASSERT(E->stage == ENTROPY_WARM); 446 447 /* 448 * Establish the softint at the highest softint priority level. 449 * Must happen after CPU detection. 450 */ 451 sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 452 &entropy_softintr, NULL); 453 if (sih == NULL) 454 panic("unable to establish entropy softint"); 455 456 /* 457 * Create the entropy housekeeping thread. Must happen after 458 * lwpinit. 459 */ 460 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 461 entropy_thread, NULL, &entropy_lwp, "entbutler"); 462 if (error) 463 panic("unable to create entropy housekeeping thread: %d", 464 error); 465 466 /* 467 * Wait until the per-CPU initialization has hit all CPUs 468 * before proceeding to mark the entropy system hot and 469 * enabling use of the softint. 470 */ 471 xc_barrier(XC_HIGHPRI); 472 E->stage = ENTROPY_HOT; 473 atomic_store_relaxed(&entropy_sih, sih); 474 475 /* 476 * At this point, entering new samples from interrupt handlers 477 * will trigger the softint to process them. But there may be 478 * some samples that were entered from interrupt handlers 479 * before the softint was available. Make sure we process 480 * those samples on all CPUs by running the softint logic on 481 * all CPUs. 482 */ 483 xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL)); 484 } 485 486 /* 487 * entropy_init_cpu(ptr, cookie, ci) 488 * 489 * percpu(9) constructor for per-CPU entropy pool. 490 */ 491 static void 492 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 493 { 494 struct entropy_cpu *ec = ptr; 495 const char *cpuname; 496 497 ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP); 498 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 499 ec->ec_pending = 0; 500 ec->ec_locked = false; 501 502 /* XXX ci_cpuname may not be initialized early enough. */ 503 cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; 504 evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL, 505 cpuname, "entropy softint"); 506 evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL, 507 cpuname, "entropy intrdrop"); 508 evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL, 509 cpuname, "entropy intrtrunc"); 510 } 511 512 /* 513 * entropy_fini_cpu(ptr, cookie, ci) 514 * 515 * percpu(9) destructor for per-CPU entropy pool. 516 */ 517 static void 518 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 519 { 520 struct entropy_cpu *ec = ptr; 521 522 /* 523 * Zero any lingering data. Disclosure of the per-CPU pool 524 * shouldn't retroactively affect the security of any keys 525 * generated, because entpool(9) erases whatever we have just 526 * drawn out of any pool, but better safe than sorry. 527 */ 528 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 529 530 evcnt_detach(&ec->ec_evcnt->intrtrunc); 531 evcnt_detach(&ec->ec_evcnt->intrdrop); 532 evcnt_detach(&ec->ec_evcnt->softint); 533 534 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 535 kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt)); 536 } 537 538 /* 539 * ec = entropy_cpu_get(&lock) 540 * entropy_cpu_put(&lock, ec) 541 * 542 * Lock and unlock the per-CPU entropy state. This only prevents 543 * access on the same CPU -- by hard interrupts, by soft 544 * interrupts, or by other threads. 545 * 546 * Blocks soft interrupts and preemption altogether; doesn't block 547 * hard interrupts, but causes samples in hard interrupts to be 548 * dropped. 549 */ 550 static struct entropy_cpu * 551 entropy_cpu_get(struct entropy_cpu_lock *lock) 552 { 553 struct entropy_cpu *ec; 554 555 ec = percpu_getref(entropy_percpu); 556 lock->ecl_s = splsoftserial(); 557 KASSERT(!ec->ec_locked); 558 ec->ec_locked = true; 559 lock->ecl_ncsw = curlwp->l_ncsw; 560 __insn_barrier(); 561 562 return ec; 563 } 564 565 static void 566 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec) 567 { 568 569 KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu())); 570 KASSERT(ec->ec_locked); 571 572 __insn_barrier(); 573 KASSERT(lock->ecl_ncsw == curlwp->l_ncsw); 574 ec->ec_locked = false; 575 splx(lock->ecl_s); 576 percpu_putref(entropy_percpu); 577 } 578 579 /* 580 * entropy_seed(seed) 581 * 582 * Seed the entropy pool with seed. Meant to be called as early 583 * as possible by the bootloader; may be called before or after 584 * entropy_init. Must be called before system reaches userland. 585 * Must be called in thread or soft interrupt context, not in hard 586 * interrupt context. Must be called at most once. 587 * 588 * Overwrites the seed in place. Caller may then free the memory. 589 */ 590 static void 591 entropy_seed(rndsave_t *seed) 592 { 593 SHA1_CTX ctx; 594 uint8_t digest[SHA1_DIGEST_LENGTH]; 595 bool seeded; 596 597 /* 598 * Verify the checksum. If the checksum fails, take the data 599 * but ignore the entropy estimate -- the file may have been 600 * incompletely written with garbage, which is harmless to add 601 * but may not be as unpredictable as alleged. 602 */ 603 SHA1Init(&ctx); 604 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 605 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 606 SHA1Final(digest, &ctx); 607 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 608 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 609 printf("entropy: invalid seed checksum\n"); 610 seed->entropy = 0; 611 } 612 explicit_memset(&ctx, 0, sizeof ctx); 613 explicit_memset(digest, 0, sizeof digest); 614 615 /* 616 * If the entropy is insensibly large, try byte-swapping. 617 * Otherwise assume the file is corrupted and act as though it 618 * has zero entropy. 619 */ 620 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 621 seed->entropy = bswap32(seed->entropy); 622 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 623 seed->entropy = 0; 624 } 625 626 /* Make sure the seed source is attached. */ 627 attach_seed_rndsource(); 628 629 /* Test and set E->seeded. */ 630 if (E->stage >= ENTROPY_WARM) 631 mutex_enter(&E->lock); 632 seeded = E->seeded; 633 E->seeded = (seed->entropy > 0); 634 if (E->stage >= ENTROPY_WARM) 635 mutex_exit(&E->lock); 636 637 /* 638 * If we've been seeded, may be re-entering the same seed 639 * (e.g., bootloader vs module init, or something). No harm in 640 * entering it twice, but it contributes no additional entropy. 641 */ 642 if (seeded) { 643 printf("entropy: double-seeded by bootloader\n"); 644 seed->entropy = 0; 645 } else { 646 printf("entropy: entering seed from bootloader" 647 " with %u bits of entropy\n", (unsigned)seed->entropy); 648 } 649 650 /* Enter it into the pool and promptly zero it. */ 651 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 652 seed->entropy); 653 explicit_memset(seed, 0, sizeof(*seed)); 654 } 655 656 /* 657 * entropy_bootrequest() 658 * 659 * Request entropy from all sources at boot, once config is 660 * complete and interrupts are running. 661 */ 662 void 663 entropy_bootrequest(void) 664 { 665 int error; 666 667 KASSERT(E->stage >= ENTROPY_WARM); 668 669 /* 670 * Request enough to satisfy the maximum entropy shortage. 671 * This is harmless overkill if the bootloader provided a seed. 672 */ 673 mutex_enter(&E->lock); 674 error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT); 675 KASSERT(error == 0); 676 mutex_exit(&E->lock); 677 } 678 679 /* 680 * entropy_epoch() 681 * 682 * Returns the current entropy epoch. If this changes, you should 683 * reseed. If -1, means system entropy has not yet reached full 684 * entropy or been explicitly consolidated; never reverts back to 685 * -1. Never zero, so you can always use zero as an uninitialized 686 * sentinel value meaning `reseed ASAP'. 687 * 688 * Usage model: 689 * 690 * struct foo { 691 * struct crypto_prng prng; 692 * unsigned epoch; 693 * } *foo; 694 * 695 * unsigned epoch = entropy_epoch(); 696 * if (__predict_false(epoch != foo->epoch)) { 697 * uint8_t seed[32]; 698 * if (entropy_extract(seed, sizeof seed, 0) != 0) 699 * warn("no entropy"); 700 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 701 * foo->epoch = epoch; 702 * } 703 */ 704 unsigned 705 entropy_epoch(void) 706 { 707 708 /* 709 * Unsigned int, so no need for seqlock for an atomic read, but 710 * make sure we read it afresh each time. 711 */ 712 return atomic_load_relaxed(&E->epoch); 713 } 714 715 /* 716 * entropy_ready() 717 * 718 * True if the entropy pool has full entropy. 719 */ 720 bool 721 entropy_ready(void) 722 { 723 724 return atomic_load_relaxed(&E->needed) == 0; 725 } 726 727 /* 728 * entropy_account_cpu(ec) 729 * 730 * Consider whether to consolidate entropy into the global pool 731 * after we just added some into the current CPU's pending pool. 732 * 733 * - If this CPU can provide enough entropy now, do so. 734 * 735 * - If this and whatever else is available on other CPUs can 736 * provide enough entropy, kick the consolidation thread. 737 * 738 * - Otherwise, do as little as possible, except maybe consolidate 739 * entropy at most once a minute. 740 * 741 * Caller must be bound to a CPU and therefore have exclusive 742 * access to ec. Will acquire and release the global lock. 743 */ 744 static void 745 entropy_account_cpu(struct entropy_cpu *ec) 746 { 747 struct entropy_cpu_lock lock; 748 struct entropy_cpu *ec0; 749 unsigned diff; 750 751 KASSERT(E->stage >= ENTROPY_WARM); 752 KASSERT(curlwp->l_pflag & LP_BOUND); 753 754 /* 755 * If there's no entropy needed, and entropy has been 756 * consolidated in the last minute, do nothing. 757 */ 758 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 759 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 760 __predict_true((time_uptime - E->timestamp) <= 60)) 761 return; 762 763 /* 764 * Consider consolidation, under the global lock and with the 765 * per-CPU state locked. 766 */ 767 mutex_enter(&E->lock); 768 ec0 = entropy_cpu_get(&lock); 769 KASSERT(ec0 == ec); 770 if (ec->ec_pending == 0) { 771 /* Raced with consolidation xcall. Nothing to do. */ 772 } else if (E->needed != 0 && E->needed <= ec->ec_pending) { 773 /* 774 * If we have not yet attained full entropy but we can 775 * now, do so. This way we disseminate entropy 776 * promptly when it becomes available early at boot; 777 * otherwise we leave it to the entropy consolidation 778 * thread, which is rate-limited to mitigate side 779 * channels and abuse. 780 */ 781 uint8_t buf[ENTPOOL_CAPACITY]; 782 783 /* Transfer from the local pool to the global pool. */ 784 entpool_extract(ec->ec_pool, buf, sizeof buf); 785 entpool_enter(&E->pool, buf, sizeof buf); 786 atomic_store_relaxed(&ec->ec_pending, 0); 787 atomic_store_relaxed(&E->needed, 0); 788 789 /* Notify waiters that we now have full entropy. */ 790 entropy_notify(); 791 entropy_immediate_evcnt.ev_count++; 792 } else { 793 /* Determine how much we can add to the global pool. */ 794 KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY, 795 "E->pending=%u", E->pending); 796 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); 797 798 /* 799 * This should make a difference unless we are already 800 * saturated. 801 */ 802 KASSERTMSG(diff || E->pending == ENTROPY_CAPACITY*NBBY, 803 "diff=%u E->pending=%u ec->ec_pending=%u cap=%u", 804 diff, E->pending, ec->ec_pending, 805 (unsigned)ENTROPY_CAPACITY*NBBY); 806 807 /* Add to the global, subtract from the local. */ 808 E->pending += diff; 809 KASSERT(E->pending); 810 KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY, 811 "E->pending=%u", E->pending); 812 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); 813 814 if (E->needed <= E->pending) { 815 /* 816 * Enough entropy between all the per-CPU 817 * pools. Wake up the housekeeping thread. 818 * 819 * If we don't need any entropy, this doesn't 820 * mean much, but it is the only time we ever 821 * gather additional entropy in case the 822 * accounting has been overly optimistic. This 823 * happens at most once a minute, so there's 824 * negligible performance cost. 825 */ 826 E->consolidate = true; 827 cv_broadcast(&E->cv); 828 if (E->needed == 0) 829 entropy_discretionary_evcnt.ev_count++; 830 } else { 831 /* Can't get full entropy. Keep gathering. */ 832 entropy_partial_evcnt.ev_count++; 833 } 834 } 835 entropy_cpu_put(&lock, ec); 836 mutex_exit(&E->lock); 837 } 838 839 /* 840 * entropy_enter_early(buf, len, nbits) 841 * 842 * Do entropy bookkeeping globally, before we have established 843 * per-CPU pools. Enter directly into the global pool in the hope 844 * that we enter enough before the first entropy_extract to thwart 845 * iterative-guessing attacks; entropy_extract will warn if not. 846 */ 847 static void 848 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 849 { 850 bool notify = false; 851 852 KASSERT(E->stage == ENTROPY_COLD); 853 854 /* Enter it into the pool. */ 855 entpool_enter(&E->pool, buf, len); 856 857 /* 858 * Decide whether to notify reseed -- we will do so if either: 859 * (a) we transition from partial entropy to full entropy, or 860 * (b) we get a batch of full entropy all at once. 861 */ 862 notify |= (E->needed && E->needed <= nbits); 863 notify |= (nbits >= ENTROPY_CAPACITY*NBBY); 864 865 /* Subtract from the needed count and notify if appropriate. */ 866 E->needed -= MIN(E->needed, nbits); 867 if (notify) { 868 entropy_notify(); 869 entropy_immediate_evcnt.ev_count++; 870 } 871 } 872 873 /* 874 * entropy_enter(buf, len, nbits) 875 * 876 * Enter len bytes of data from buf into the system's entropy 877 * pool, stirring as necessary when the internal buffer fills up. 878 * nbits is a lower bound on the number of bits of entropy in the 879 * process that led to this sample. 880 */ 881 static void 882 entropy_enter(const void *buf, size_t len, unsigned nbits) 883 { 884 struct entropy_cpu_lock lock; 885 struct entropy_cpu *ec; 886 unsigned pending; 887 int bound; 888 889 KASSERTMSG(!cpu_intr_p(), 890 "use entropy_enter_intr from interrupt context"); 891 KASSERTMSG(howmany(nbits, NBBY) <= len, 892 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 893 894 /* If it's too early after boot, just use entropy_enter_early. */ 895 if (__predict_false(E->stage == ENTROPY_COLD)) { 896 entropy_enter_early(buf, len, nbits); 897 return; 898 } 899 900 /* 901 * Bind ourselves to the current CPU so we don't switch CPUs 902 * between entering data into the current CPU's pool (and 903 * updating the pending count) and transferring it to the 904 * global pool in entropy_account_cpu. 905 */ 906 bound = curlwp_bind(); 907 908 /* 909 * With the per-CPU state locked, enter into the per-CPU pool 910 * and count up what we can add. 911 */ 912 ec = entropy_cpu_get(&lock); 913 entpool_enter(ec->ec_pool, buf, len); 914 pending = ec->ec_pending; 915 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 916 atomic_store_relaxed(&ec->ec_pending, pending); 917 entropy_cpu_put(&lock, ec); 918 919 /* Consolidate globally if appropriate based on what we added. */ 920 if (pending) 921 entropy_account_cpu(ec); 922 923 curlwp_bindx(bound); 924 } 925 926 /* 927 * entropy_enter_intr(buf, len, nbits) 928 * 929 * Enter up to len bytes of data from buf into the system's 930 * entropy pool without stirring. nbits is a lower bound on the 931 * number of bits of entropy in the process that led to this 932 * sample. If the sample could be entered completely, assume 933 * nbits of entropy pending; otherwise assume none, since we don't 934 * know whether some parts of the sample are constant, for 935 * instance. Schedule a softint to stir the entropy pool if 936 * needed. Return true if used fully, false if truncated at all. 937 * 938 * Using this in thread context will work, but you might as well 939 * use entropy_enter in that case. 940 */ 941 static bool 942 entropy_enter_intr(const void *buf, size_t len, unsigned nbits) 943 { 944 struct entropy_cpu *ec; 945 bool fullyused = false; 946 uint32_t pending; 947 void *sih; 948 949 KASSERT(cpu_intr_p()); 950 KASSERTMSG(howmany(nbits, NBBY) <= len, 951 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 952 953 /* If it's too early after boot, just use entropy_enter_early. */ 954 if (__predict_false(E->stage == ENTROPY_COLD)) { 955 entropy_enter_early(buf, len, nbits); 956 return true; 957 } 958 959 /* 960 * Acquire the per-CPU state. If someone is in the middle of 961 * using it, drop the sample. Otherwise, take the lock so that 962 * higher-priority interrupts will drop their samples. 963 */ 964 ec = percpu_getref(entropy_percpu); 965 if (ec->ec_locked) { 966 ec->ec_evcnt->intrdrop.ev_count++; 967 goto out0; 968 } 969 ec->ec_locked = true; 970 __insn_barrier(); 971 972 /* 973 * Enter as much as we can into the per-CPU pool. If it was 974 * truncated, schedule a softint to stir the pool and stop. 975 */ 976 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 977 sih = atomic_load_relaxed(&entropy_sih); 978 if (__predict_true(sih != NULL)) 979 softint_schedule(sih); 980 ec->ec_evcnt->intrtrunc.ev_count++; 981 goto out1; 982 } 983 fullyused = true; 984 985 /* Count up what we can contribute. */ 986 pending = ec->ec_pending; 987 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 988 atomic_store_relaxed(&ec->ec_pending, pending); 989 990 /* Schedule a softint if we added anything and it matters. */ 991 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || 992 atomic_load_relaxed(&entropy_depletion)) && 993 nbits != 0) { 994 sih = atomic_load_relaxed(&entropy_sih); 995 if (__predict_true(sih != NULL)) 996 softint_schedule(sih); 997 } 998 999 out1: /* Release the per-CPU state. */ 1000 KASSERT(ec->ec_locked); 1001 __insn_barrier(); 1002 ec->ec_locked = false; 1003 out0: percpu_putref(entropy_percpu); 1004 1005 return fullyused; 1006 } 1007 1008 /* 1009 * entropy_softintr(cookie) 1010 * 1011 * Soft interrupt handler for entering entropy. Takes care of 1012 * stirring the local CPU's entropy pool if it filled up during 1013 * hard interrupts, and promptly crediting entropy from the local 1014 * CPU's entropy pool to the global entropy pool if needed. 1015 */ 1016 static void 1017 entropy_softintr(void *cookie) 1018 { 1019 struct entropy_cpu_lock lock; 1020 struct entropy_cpu *ec; 1021 unsigned pending; 1022 1023 /* 1024 * With the per-CPU state locked, stir the pool if necessary 1025 * and determine if there's any pending entropy on this CPU to 1026 * account globally. 1027 */ 1028 ec = entropy_cpu_get(&lock); 1029 ec->ec_evcnt->softint.ev_count++; 1030 entpool_stir(ec->ec_pool); 1031 pending = ec->ec_pending; 1032 entropy_cpu_put(&lock, ec); 1033 1034 /* Consolidate globally if appropriate based on what we added. */ 1035 if (pending) 1036 entropy_account_cpu(ec); 1037 } 1038 1039 /* 1040 * entropy_thread(cookie) 1041 * 1042 * Handle any asynchronous entropy housekeeping. 1043 */ 1044 static void 1045 entropy_thread(void *cookie) 1046 { 1047 bool consolidate; 1048 1049 for (;;) { 1050 /* 1051 * Wait until there's full entropy somewhere among the 1052 * CPUs, as confirmed at most once per minute, or 1053 * someone wants to consolidate. 1054 */ 1055 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { 1056 consolidate = true; 1057 } else { 1058 mutex_enter(&E->lock); 1059 if (!E->consolidate) 1060 cv_timedwait(&E->cv, &E->lock, 60*hz); 1061 consolidate = E->consolidate; 1062 E->consolidate = false; 1063 mutex_exit(&E->lock); 1064 } 1065 1066 if (consolidate) { 1067 /* Do it. */ 1068 entropy_do_consolidate(); 1069 1070 /* Mitigate abuse. */ 1071 kpause("entropy", false, hz, NULL); 1072 } 1073 } 1074 } 1075 1076 /* 1077 * entropy_pending() 1078 * 1079 * Count up the amount of entropy pending on other CPUs. 1080 */ 1081 static uint32_t 1082 entropy_pending(void) 1083 { 1084 uint32_t pending = 0; 1085 1086 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); 1087 return pending; 1088 } 1089 1090 static void 1091 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1092 { 1093 struct entropy_cpu *ec = ptr; 1094 uint32_t *pendingp = cookie; 1095 uint32_t cpu_pending; 1096 1097 cpu_pending = atomic_load_relaxed(&ec->ec_pending); 1098 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); 1099 } 1100 1101 /* 1102 * entropy_do_consolidate() 1103 * 1104 * Issue a cross-call to gather entropy on all CPUs and advance 1105 * the entropy epoch. 1106 */ 1107 static void 1108 entropy_do_consolidate(void) 1109 { 1110 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1111 static struct timeval lasttime; /* serialized by E->lock */ 1112 struct entpool pool; 1113 uint8_t buf[ENTPOOL_CAPACITY]; 1114 unsigned diff; 1115 uint64_t ticket; 1116 1117 /* Gather entropy on all CPUs into a temporary pool. */ 1118 memset(&pool, 0, sizeof pool); 1119 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1120 xc_wait(ticket); 1121 1122 /* Acquire the lock to notify waiters. */ 1123 mutex_enter(&E->lock); 1124 1125 /* Count another consolidation. */ 1126 entropy_consolidate_evcnt.ev_count++; 1127 1128 /* Note when we last consolidated, i.e. now. */ 1129 E->timestamp = time_uptime; 1130 1131 /* Mix what we gathered into the global pool. */ 1132 entpool_extract(&pool, buf, sizeof buf); 1133 entpool_enter(&E->pool, buf, sizeof buf); 1134 explicit_memset(&pool, 0, sizeof pool); 1135 1136 /* Count the entropy that was gathered. */ 1137 diff = MIN(E->needed, E->pending); 1138 atomic_store_relaxed(&E->needed, E->needed - diff); 1139 E->pending -= diff; 1140 if (__predict_false(E->needed > 0)) { 1141 if ((boothowto & AB_DEBUG) != 0 && 1142 ratecheck(&lasttime, &interval)) { 1143 printf("WARNING:" 1144 " consolidating less than full entropy\n"); 1145 } 1146 } 1147 1148 /* Advance the epoch and notify waiters. */ 1149 entropy_notify(); 1150 1151 /* Release the lock. */ 1152 mutex_exit(&E->lock); 1153 } 1154 1155 /* 1156 * entropy_consolidate_xc(vpool, arg2) 1157 * 1158 * Extract output from the local CPU's input pool and enter it 1159 * into a temporary pool passed as vpool. 1160 */ 1161 static void 1162 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1163 { 1164 struct entpool *pool = vpool; 1165 struct entropy_cpu_lock lock; 1166 struct entropy_cpu *ec; 1167 uint8_t buf[ENTPOOL_CAPACITY]; 1168 uint32_t extra[7]; 1169 unsigned i = 0; 1170 1171 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1172 extra[i++] = cpu_number(); 1173 extra[i++] = entropy_timer(); 1174 1175 /* 1176 * With the per-CPU state locked, extract from the per-CPU pool 1177 * and count it as no longer pending. 1178 */ 1179 ec = entropy_cpu_get(&lock); 1180 extra[i++] = entropy_timer(); 1181 entpool_extract(ec->ec_pool, buf, sizeof buf); 1182 atomic_store_relaxed(&ec->ec_pending, 0); 1183 extra[i++] = entropy_timer(); 1184 entropy_cpu_put(&lock, ec); 1185 extra[i++] = entropy_timer(); 1186 1187 /* 1188 * Copy over statistics, and enter the per-CPU extract and the 1189 * extra timing into the temporary pool, under the global lock. 1190 */ 1191 mutex_enter(&E->lock); 1192 extra[i++] = entropy_timer(); 1193 entpool_enter(pool, buf, sizeof buf); 1194 explicit_memset(buf, 0, sizeof buf); 1195 extra[i++] = entropy_timer(); 1196 KASSERT(i == __arraycount(extra)); 1197 entpool_enter(pool, extra, sizeof extra); 1198 explicit_memset(extra, 0, sizeof extra); 1199 mutex_exit(&E->lock); 1200 } 1201 1202 /* 1203 * entropy_notify() 1204 * 1205 * Caller just contributed entropy to the global pool. Advance 1206 * the entropy epoch and notify waiters. 1207 * 1208 * Caller must hold the global entropy lock. Except for the 1209 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must 1210 * have just have transitioned from partial entropy to full 1211 * entropy -- E->needed should be zero now. 1212 */ 1213 static void 1214 entropy_notify(void) 1215 { 1216 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1217 static struct timeval lasttime; /* serialized by E->lock */ 1218 unsigned epoch; 1219 1220 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1221 1222 /* 1223 * If this is the first time, print a message to the console 1224 * that we're ready so operators can compare it to the timing 1225 * of other events. 1226 */ 1227 if (__predict_false(E->epoch == (unsigned)-1) && E->needed == 0) 1228 printf("entropy: ready\n"); 1229 1230 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1231 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1232 ratecheck(&lasttime, &interval)) { 1233 epoch = E->epoch + 1; 1234 if (epoch == 0 || epoch == (unsigned)-1) 1235 epoch = 1; 1236 atomic_store_relaxed(&E->epoch, epoch); 1237 } 1238 KASSERT(E->epoch != (unsigned)-1); 1239 1240 /* Notify waiters. */ 1241 if (E->stage >= ENTROPY_WARM) { 1242 cv_broadcast(&E->cv); 1243 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1244 } 1245 1246 /* Count another notification. */ 1247 entropy_notify_evcnt.ev_count++; 1248 } 1249 1250 /* 1251 * entropy_consolidate() 1252 * 1253 * Trigger entropy consolidation and wait for it to complete. 1254 * 1255 * This should be used sparingly, not periodically -- requiring 1256 * conscious intervention by the operator or a clear policy 1257 * decision. Otherwise, the kernel will automatically consolidate 1258 * when enough entropy has been gathered into per-CPU pools to 1259 * transition to full entropy. 1260 */ 1261 void 1262 entropy_consolidate(void) 1263 { 1264 uint64_t ticket; 1265 int error; 1266 1267 KASSERT(E->stage == ENTROPY_HOT); 1268 1269 mutex_enter(&E->lock); 1270 ticket = entropy_consolidate_evcnt.ev_count; 1271 E->consolidate = true; 1272 cv_broadcast(&E->cv); 1273 while (ticket == entropy_consolidate_evcnt.ev_count) { 1274 error = cv_wait_sig(&E->cv, &E->lock); 1275 if (error) 1276 break; 1277 } 1278 mutex_exit(&E->lock); 1279 } 1280 1281 /* 1282 * sysctl -w kern.entropy.consolidate=1 1283 * 1284 * Trigger entropy consolidation and wait for it to complete. 1285 * Writable only by superuser. This, writing to /dev/random, and 1286 * ioctl(RNDADDDATA) are the only ways for the system to 1287 * consolidate entropy if the operator knows something the kernel 1288 * doesn't about how unpredictable the pending entropy pools are. 1289 */ 1290 static int 1291 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1292 { 1293 struct sysctlnode node = *rnode; 1294 int arg; 1295 int error; 1296 1297 KASSERT(E->stage == ENTROPY_HOT); 1298 1299 node.sysctl_data = &arg; 1300 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1301 if (error || newp == NULL) 1302 return error; 1303 if (arg) 1304 entropy_consolidate(); 1305 1306 return error; 1307 } 1308 1309 /* 1310 * sysctl -w kern.entropy.gather=1 1311 * 1312 * Trigger gathering entropy from all on-demand sources, and wait 1313 * for synchronous sources (but not asynchronous sources) to 1314 * complete. Writable only by superuser. 1315 */ 1316 static int 1317 sysctl_entropy_gather(SYSCTLFN_ARGS) 1318 { 1319 struct sysctlnode node = *rnode; 1320 int arg; 1321 int error; 1322 1323 KASSERT(E->stage == ENTROPY_HOT); 1324 1325 node.sysctl_data = &arg; 1326 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1327 if (error || newp == NULL) 1328 return error; 1329 if (arg) { 1330 mutex_enter(&E->lock); 1331 error = entropy_request(ENTROPY_CAPACITY, 1332 ENTROPY_WAIT|ENTROPY_SIG); 1333 mutex_exit(&E->lock); 1334 } 1335 1336 return 0; 1337 } 1338 1339 /* 1340 * entropy_extract(buf, len, flags) 1341 * 1342 * Extract len bytes from the global entropy pool into buf. 1343 * 1344 * Flags may have: 1345 * 1346 * ENTROPY_WAIT Wait for entropy if not available yet. 1347 * ENTROPY_SIG Allow interruption by a signal during wait. 1348 * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1349 * or fail without filling it at all. 1350 * 1351 * Return zero on success, or error on failure: 1352 * 1353 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1354 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1355 * 1356 * If ENTROPY_WAIT is set, allowed only in thread context. If 1357 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's 1358 * awfully high... Do we really need it in hard interrupts? This 1359 * arises from use of cprng_strong(9).) 1360 */ 1361 int 1362 entropy_extract(void *buf, size_t len, int flags) 1363 { 1364 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1365 static struct timeval lasttime; /* serialized by E->lock */ 1366 int error; 1367 1368 if (ISSET(flags, ENTROPY_WAIT)) { 1369 ASSERT_SLEEPABLE(); 1370 KASSERTMSG(E->stage >= ENTROPY_WARM, 1371 "can't wait for entropy until warm"); 1372 } 1373 1374 /* Refuse to operate in interrupt context. */ 1375 KASSERT(!cpu_intr_p()); 1376 1377 /* Acquire the global lock to get at the global pool. */ 1378 if (E->stage >= ENTROPY_WARM) 1379 mutex_enter(&E->lock); 1380 1381 /* Wait until there is enough entropy in the system. */ 1382 error = 0; 1383 while (E->needed) { 1384 /* Ask for more, synchronously if possible. */ 1385 error = entropy_request(len, flags); 1386 if (error) 1387 break; 1388 1389 /* If we got enough, we're done. */ 1390 if (E->needed == 0) { 1391 KASSERT(error == 0); 1392 break; 1393 } 1394 1395 /* If not waiting, stop here. */ 1396 if (!ISSET(flags, ENTROPY_WAIT)) { 1397 error = EWOULDBLOCK; 1398 break; 1399 } 1400 1401 /* Wait for some entropy to come in and try again. */ 1402 KASSERT(E->stage >= ENTROPY_WARM); 1403 printf("entropy: pid %d (%s) blocking due to lack of entropy\n", 1404 curproc->p_pid, curproc->p_comm); 1405 1406 if (ISSET(flags, ENTROPY_SIG)) { 1407 error = cv_wait_sig(&E->cv, &E->lock); 1408 if (error) 1409 break; 1410 } else { 1411 cv_wait(&E->cv, &E->lock); 1412 } 1413 } 1414 1415 /* 1416 * Count failure -- but fill the buffer nevertheless, unless 1417 * the caller specified ENTROPY_HARDFAIL. 1418 */ 1419 if (error) { 1420 if (ISSET(flags, ENTROPY_HARDFAIL)) 1421 goto out; 1422 entropy_extract_fail_evcnt.ev_count++; 1423 } 1424 1425 /* 1426 * Report a warning if we have never yet reached full entropy. 1427 * This is the only case where we consider entropy to be 1428 * `depleted' without kern.entropy.depletion enabled -- when we 1429 * only have partial entropy, an adversary may be able to 1430 * narrow the state of the pool down to a small number of 1431 * possibilities; the output then enables them to confirm a 1432 * guess, reducing its entropy from the adversary's perspective 1433 * to zero. 1434 */ 1435 if (__predict_false(E->epoch == (unsigned)-1)) { 1436 if (ratecheck(&lasttime, &interval)) 1437 printf("WARNING:" 1438 " system needs entropy for security;" 1439 " see entropy(7)\n"); 1440 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); 1441 } 1442 1443 /* Extract data from the pool, and `deplete' if we're doing that. */ 1444 entpool_extract(&E->pool, buf, len); 1445 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1446 error == 0) { 1447 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1448 1449 atomic_store_relaxed(&E->needed, 1450 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); 1451 entropy_deplete_evcnt.ev_count++; 1452 } 1453 1454 out: /* Release the global lock and return the error. */ 1455 if (E->stage >= ENTROPY_WARM) 1456 mutex_exit(&E->lock); 1457 return error; 1458 } 1459 1460 /* 1461 * entropy_poll(events) 1462 * 1463 * Return the subset of events ready, and if it is not all of 1464 * events, record curlwp as waiting for entropy. 1465 */ 1466 int 1467 entropy_poll(int events) 1468 { 1469 int revents = 0; 1470 1471 KASSERT(E->stage >= ENTROPY_WARM); 1472 1473 /* Always ready for writing. */ 1474 revents |= events & (POLLOUT|POLLWRNORM); 1475 1476 /* Narrow it down to reads. */ 1477 events &= POLLIN|POLLRDNORM; 1478 if (events == 0) 1479 return revents; 1480 1481 /* 1482 * If we have reached full entropy and we're not depleting 1483 * entropy, we are forever ready. 1484 */ 1485 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 1486 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1487 return revents | events; 1488 1489 /* 1490 * Otherwise, check whether we need entropy under the lock. If 1491 * we don't, we're ready; if we do, add ourselves to the queue. 1492 */ 1493 mutex_enter(&E->lock); 1494 if (E->needed == 0) 1495 revents |= events; 1496 else 1497 selrecord(curlwp, &E->selq); 1498 mutex_exit(&E->lock); 1499 1500 return revents; 1501 } 1502 1503 /* 1504 * filt_entropy_read_detach(kn) 1505 * 1506 * struct filterops::f_detach callback for entropy read events: 1507 * remove kn from the list of waiters. 1508 */ 1509 static void 1510 filt_entropy_read_detach(struct knote *kn) 1511 { 1512 1513 KASSERT(E->stage >= ENTROPY_WARM); 1514 1515 mutex_enter(&E->lock); 1516 selremove_knote(&E->selq, kn); 1517 mutex_exit(&E->lock); 1518 } 1519 1520 /* 1521 * filt_entropy_read_event(kn, hint) 1522 * 1523 * struct filterops::f_event callback for entropy read events: 1524 * poll for entropy. Caller must hold the global entropy lock if 1525 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1526 */ 1527 static int 1528 filt_entropy_read_event(struct knote *kn, long hint) 1529 { 1530 int ret; 1531 1532 KASSERT(E->stage >= ENTROPY_WARM); 1533 1534 /* Acquire the lock, if caller is outside entropy subsystem. */ 1535 if (hint == NOTE_SUBMIT) 1536 KASSERT(mutex_owned(&E->lock)); 1537 else 1538 mutex_enter(&E->lock); 1539 1540 /* 1541 * If we still need entropy, can't read anything; if not, can 1542 * read arbitrarily much. 1543 */ 1544 if (E->needed != 0) { 1545 ret = 0; 1546 } else { 1547 if (atomic_load_relaxed(&entropy_depletion)) 1548 kn->kn_data = ENTROPY_CAPACITY*NBBY; 1549 else 1550 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1551 ret = 1; 1552 } 1553 1554 /* Release the lock, if caller is outside entropy subsystem. */ 1555 if (hint == NOTE_SUBMIT) 1556 KASSERT(mutex_owned(&E->lock)); 1557 else 1558 mutex_exit(&E->lock); 1559 1560 return ret; 1561 } 1562 1563 /* XXX Makes sense only for /dev/u?random. */ 1564 static const struct filterops entropy_read_filtops = { 1565 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 1566 .f_attach = NULL, 1567 .f_detach = filt_entropy_read_detach, 1568 .f_event = filt_entropy_read_event, 1569 }; 1570 1571 /* 1572 * entropy_kqfilter(kn) 1573 * 1574 * Register kn to receive entropy event notifications. May be 1575 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1576 */ 1577 int 1578 entropy_kqfilter(struct knote *kn) 1579 { 1580 1581 KASSERT(E->stage >= ENTROPY_WARM); 1582 1583 switch (kn->kn_filter) { 1584 case EVFILT_READ: 1585 /* Enter into the global select queue. */ 1586 mutex_enter(&E->lock); 1587 kn->kn_fop = &entropy_read_filtops; 1588 selrecord_knote(&E->selq, kn); 1589 mutex_exit(&E->lock); 1590 return 0; 1591 case EVFILT_WRITE: 1592 /* Can always dump entropy into the system. */ 1593 kn->kn_fop = &seltrue_filtops; 1594 return 0; 1595 default: 1596 return EINVAL; 1597 } 1598 } 1599 1600 /* 1601 * rndsource_setcb(rs, get, getarg) 1602 * 1603 * Set the request callback for the entropy source rs, if it can 1604 * provide entropy on demand. Must precede rnd_attach_source. 1605 */ 1606 void 1607 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1608 void *getarg) 1609 { 1610 1611 rs->get = get; 1612 rs->getarg = getarg; 1613 } 1614 1615 /* 1616 * rnd_attach_source(rs, name, type, flags) 1617 * 1618 * Attach the entropy source rs. Must be done after 1619 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1620 */ 1621 void 1622 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1623 uint32_t flags) 1624 { 1625 uint32_t extra[4]; 1626 unsigned i = 0; 1627 1628 /* Grab cycle counter to mix extra into the pool. */ 1629 extra[i++] = entropy_timer(); 1630 1631 /* 1632 * Apply some standard flags: 1633 * 1634 * - We do not bother with network devices by default, for 1635 * hysterical raisins (perhaps: because it is often the case 1636 * that an adversary can influence network packet timings). 1637 */ 1638 switch (type) { 1639 case RND_TYPE_NET: 1640 flags |= RND_FLAG_NO_COLLECT; 1641 break; 1642 } 1643 1644 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1645 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1646 1647 /* Initialize the random source. */ 1648 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1649 strlcpy(rs->name, name, sizeof(rs->name)); 1650 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 1651 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 1652 rs->total = 0; 1653 rs->type = type; 1654 rs->flags = flags; 1655 if (E->stage >= ENTROPY_WARM) 1656 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1657 extra[i++] = entropy_timer(); 1658 1659 /* Wire it into the global list of random sources. */ 1660 if (E->stage >= ENTROPY_WARM) 1661 mutex_enter(&E->lock); 1662 LIST_INSERT_HEAD(&E->sources, rs, list); 1663 if (E->stage >= ENTROPY_WARM) 1664 mutex_exit(&E->lock); 1665 extra[i++] = entropy_timer(); 1666 1667 /* Request that it provide entropy ASAP, if we can. */ 1668 if (ISSET(flags, RND_FLAG_HASCB)) 1669 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1670 extra[i++] = entropy_timer(); 1671 1672 /* Mix the extra into the pool. */ 1673 KASSERT(i == __arraycount(extra)); 1674 entropy_enter(extra, sizeof extra, 0); 1675 explicit_memset(extra, 0, sizeof extra); 1676 } 1677 1678 /* 1679 * rnd_detach_source(rs) 1680 * 1681 * Detach the entropy source rs. May sleep waiting for users to 1682 * drain. Further use is not allowed. 1683 */ 1684 void 1685 rnd_detach_source(struct krndsource *rs) 1686 { 1687 1688 /* 1689 * If we're cold (shouldn't happen, but hey), just remove it 1690 * from the list -- there's nothing allocated. 1691 */ 1692 if (E->stage == ENTROPY_COLD) { 1693 LIST_REMOVE(rs, list); 1694 return; 1695 } 1696 1697 /* We may have to wait for entropy_request. */ 1698 ASSERT_SLEEPABLE(); 1699 1700 /* Wait until the source list is not in use, and remove it. */ 1701 mutex_enter(&E->lock); 1702 while (E->sourcelock) 1703 cv_wait(&E->sourcelock_cv, &E->lock); 1704 LIST_REMOVE(rs, list); 1705 mutex_exit(&E->lock); 1706 1707 /* Free the per-CPU data. */ 1708 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1709 } 1710 1711 /* 1712 * rnd_lock_sources(flags) 1713 * 1714 * Lock the list of entropy sources. Caller must hold the global 1715 * entropy lock. If successful, no rndsource will go away until 1716 * rnd_unlock_sources even while the caller releases the global 1717 * entropy lock. 1718 * 1719 * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 1720 * If flags & ENTROPY_SIG, allow interruption by signal. 1721 */ 1722 static int __attribute__((warn_unused_result)) 1723 rnd_lock_sources(int flags) 1724 { 1725 int error; 1726 1727 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1728 1729 while (E->sourcelock) { 1730 KASSERT(E->stage >= ENTROPY_WARM); 1731 if (!ISSET(flags, ENTROPY_WAIT)) 1732 return EWOULDBLOCK; 1733 if (ISSET(flags, ENTROPY_SIG)) { 1734 error = cv_wait_sig(&E->sourcelock_cv, &E->lock); 1735 if (error) 1736 return error; 1737 } else { 1738 cv_wait(&E->sourcelock_cv, &E->lock); 1739 } 1740 } 1741 1742 E->sourcelock = curlwp; 1743 return 0; 1744 } 1745 1746 /* 1747 * rnd_unlock_sources() 1748 * 1749 * Unlock the list of sources after rnd_lock_sources. Caller must 1750 * hold the global entropy lock. 1751 */ 1752 static void 1753 rnd_unlock_sources(void) 1754 { 1755 1756 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1757 1758 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1759 curlwp, E->sourcelock); 1760 E->sourcelock = NULL; 1761 if (E->stage >= ENTROPY_WARM) 1762 cv_signal(&E->sourcelock_cv); 1763 } 1764 1765 /* 1766 * rnd_sources_locked() 1767 * 1768 * True if we hold the list of rndsources locked, for diagnostic 1769 * assertions. 1770 */ 1771 static bool __diagused 1772 rnd_sources_locked(void) 1773 { 1774 1775 return E->sourcelock == curlwp; 1776 } 1777 1778 /* 1779 * entropy_request(nbytes, flags) 1780 * 1781 * Request nbytes bytes of entropy from all sources in the system. 1782 * OK if we overdo it. Caller must hold the global entropy lock; 1783 * will release and re-acquire it. 1784 * 1785 * If flags & ENTROPY_WAIT, wait for concurrent access to finish. 1786 * If flags & ENTROPY_SIG, allow interruption by signal. 1787 */ 1788 static int 1789 entropy_request(size_t nbytes, int flags) 1790 { 1791 struct krndsource *rs; 1792 int error; 1793 1794 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1795 if (flags & ENTROPY_WAIT) 1796 ASSERT_SLEEPABLE(); 1797 1798 /* 1799 * Lock the list of entropy sources to block rnd_detach_source 1800 * until we're done, and to serialize calls to the entropy 1801 * callbacks as guaranteed to drivers. 1802 */ 1803 error = rnd_lock_sources(flags); 1804 if (error) 1805 return error; 1806 entropy_request_evcnt.ev_count++; 1807 1808 /* Clamp to the maximum reasonable request. */ 1809 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1810 1811 /* Walk the list of sources. */ 1812 LIST_FOREACH(rs, &E->sources, list) { 1813 /* Skip sources without callbacks. */ 1814 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1815 continue; 1816 1817 /* 1818 * Skip sources that are disabled altogether -- we 1819 * would just ignore their samples anyway. 1820 */ 1821 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 1822 continue; 1823 1824 /* Drop the lock while we call the callback. */ 1825 if (E->stage >= ENTROPY_WARM) 1826 mutex_exit(&E->lock); 1827 (*rs->get)(nbytes, rs->getarg); 1828 if (E->stage >= ENTROPY_WARM) 1829 mutex_enter(&E->lock); 1830 } 1831 1832 /* Request done; unlock the list of entropy sources. */ 1833 rnd_unlock_sources(); 1834 return 0; 1835 } 1836 1837 /* 1838 * rnd_add_uint32(rs, value) 1839 * 1840 * Enter 32 bits of data from an entropy source into the pool. 1841 * 1842 * If rs is NULL, may not be called from interrupt context. 1843 * 1844 * If rs is non-NULL, may be called from any context. May drop 1845 * data if called from interrupt context. 1846 */ 1847 void 1848 rnd_add_uint32(struct krndsource *rs, uint32_t value) 1849 { 1850 1851 rnd_add_data(rs, &value, sizeof value, 0); 1852 } 1853 1854 void 1855 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 1856 { 1857 1858 rnd_add_data(rs, &value, sizeof value, 0); 1859 } 1860 1861 void 1862 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 1863 { 1864 1865 rnd_add_data(rs, &value, sizeof value, 0); 1866 } 1867 1868 /* 1869 * rnd_add_data(rs, buf, len, entropybits) 1870 * 1871 * Enter data from an entropy source into the pool, with a 1872 * driver's estimate of how much entropy the physical source of 1873 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 1874 * estimate and treat it as zero. 1875 * 1876 * If rs is NULL, may not be called from interrupt context. 1877 * 1878 * If rs is non-NULL, may be called from any context. May drop 1879 * data if called from interrupt context. 1880 */ 1881 void 1882 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 1883 uint32_t entropybits) 1884 { 1885 uint32_t extra; 1886 uint32_t flags; 1887 1888 KASSERTMSG(howmany(entropybits, NBBY) <= len, 1889 "%s: impossible entropy rate:" 1890 " %"PRIu32" bits in %"PRIu32"-byte string", 1891 rs ? rs->name : "(anonymous)", entropybits, len); 1892 1893 /* If there's no rndsource, just enter the data and time now. */ 1894 if (rs == NULL) { 1895 entropy_enter(buf, len, entropybits); 1896 extra = entropy_timer(); 1897 entropy_enter(&extra, sizeof extra, 0); 1898 explicit_memset(&extra, 0, sizeof extra); 1899 return; 1900 } 1901 1902 /* Load a snapshot of the flags. Ioctl may change them under us. */ 1903 flags = atomic_load_relaxed(&rs->flags); 1904 1905 /* 1906 * Skip if: 1907 * - we're not collecting entropy, or 1908 * - the operator doesn't want to collect entropy from this, or 1909 * - neither data nor timings are being collected from this. 1910 */ 1911 if (!atomic_load_relaxed(&entropy_collection) || 1912 ISSET(flags, RND_FLAG_NO_COLLECT) || 1913 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 1914 return; 1915 1916 /* If asked, ignore the estimate. */ 1917 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 1918 entropybits = 0; 1919 1920 /* If we are collecting data, enter them. */ 1921 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) 1922 rnd_add_data_1(rs, buf, len, entropybits, 1923 RND_FLAG_COLLECT_VALUE); 1924 1925 /* If we are collecting timings, enter one. */ 1926 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 1927 extra = entropy_timer(); 1928 rnd_add_data_1(rs, &extra, sizeof extra, 0, 1929 RND_FLAG_COLLECT_TIME); 1930 } 1931 } 1932 1933 static unsigned 1934 add_sat(unsigned a, unsigned b) 1935 { 1936 unsigned c = a + b; 1937 1938 return (c < a ? UINT_MAX : c); 1939 } 1940 1941 /* 1942 * rnd_add_data_1(rs, buf, len, entropybits, flag) 1943 * 1944 * Internal subroutine to call either entropy_enter_intr, if we're 1945 * in interrupt context, or entropy_enter if not, and to count the 1946 * entropy in an rndsource. 1947 */ 1948 static void 1949 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 1950 uint32_t entropybits, uint32_t flag) 1951 { 1952 bool fullyused; 1953 1954 /* 1955 * If we're in interrupt context, use entropy_enter_intr and 1956 * take note of whether it consumed the full sample; if not, 1957 * use entropy_enter, which always consumes the full sample. 1958 */ 1959 if (curlwp && cpu_intr_p()) { 1960 fullyused = entropy_enter_intr(buf, len, entropybits); 1961 } else { 1962 entropy_enter(buf, len, entropybits); 1963 fullyused = true; 1964 } 1965 1966 /* 1967 * If we used the full sample, note how many bits were 1968 * contributed from this source. 1969 */ 1970 if (fullyused) { 1971 if (__predict_false(E->stage == ENTROPY_COLD)) { 1972 rs->total = add_sat(rs->total, entropybits); 1973 switch (flag) { 1974 case RND_FLAG_COLLECT_TIME: 1975 rs->time_delta.insamples = 1976 add_sat(rs->time_delta.insamples, 1); 1977 break; 1978 case RND_FLAG_COLLECT_VALUE: 1979 rs->value_delta.insamples = 1980 add_sat(rs->value_delta.insamples, 1); 1981 break; 1982 } 1983 } else { 1984 struct rndsource_cpu *rc = percpu_getref(rs->state); 1985 1986 atomic_store_relaxed(&rc->rc_entropybits, 1987 add_sat(rc->rc_entropybits, entropybits)); 1988 switch (flag) { 1989 case RND_FLAG_COLLECT_TIME: 1990 atomic_store_relaxed(&rc->rc_timesamples, 1991 add_sat(rc->rc_timesamples, 1)); 1992 break; 1993 case RND_FLAG_COLLECT_VALUE: 1994 atomic_store_relaxed(&rc->rc_datasamples, 1995 add_sat(rc->rc_datasamples, 1)); 1996 break; 1997 } 1998 percpu_putref(rs->state); 1999 } 2000 } 2001 } 2002 2003 /* 2004 * rnd_add_data_sync(rs, buf, len, entropybits) 2005 * 2006 * Same as rnd_add_data. Originally used in rndsource callbacks, 2007 * to break an unnecessary cycle; no longer really needed. 2008 */ 2009 void 2010 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 2011 uint32_t entropybits) 2012 { 2013 2014 rnd_add_data(rs, buf, len, entropybits); 2015 } 2016 2017 /* 2018 * rndsource_entropybits(rs) 2019 * 2020 * Return approximately the number of bits of entropy that have 2021 * been contributed via rs so far. Approximate if other CPUs may 2022 * be calling rnd_add_data concurrently. 2023 */ 2024 static unsigned 2025 rndsource_entropybits(struct krndsource *rs) 2026 { 2027 unsigned nbits = rs->total; 2028 2029 KASSERT(E->stage >= ENTROPY_WARM); 2030 KASSERT(rnd_sources_locked()); 2031 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 2032 return nbits; 2033 } 2034 2035 static void 2036 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2037 { 2038 struct rndsource_cpu *rc = ptr; 2039 unsigned *nbitsp = cookie; 2040 unsigned cpu_nbits; 2041 2042 cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits); 2043 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 2044 } 2045 2046 /* 2047 * rndsource_to_user(rs, urs) 2048 * 2049 * Copy a description of rs out to urs for userland. 2050 */ 2051 static void 2052 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 2053 { 2054 2055 KASSERT(E->stage >= ENTROPY_WARM); 2056 KASSERT(rnd_sources_locked()); 2057 2058 /* Avoid kernel memory disclosure. */ 2059 memset(urs, 0, sizeof(*urs)); 2060 2061 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 2062 strlcpy(urs->name, rs->name, sizeof(urs->name)); 2063 urs->total = rndsource_entropybits(rs); 2064 urs->type = rs->type; 2065 urs->flags = atomic_load_relaxed(&rs->flags); 2066 } 2067 2068 /* 2069 * rndsource_to_user_est(rs, urse) 2070 * 2071 * Copy a description of rs and estimation statistics out to urse 2072 * for userland. 2073 */ 2074 static void 2075 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 2076 { 2077 2078 KASSERT(E->stage >= ENTROPY_WARM); 2079 KASSERT(rnd_sources_locked()); 2080 2081 /* Avoid kernel memory disclosure. */ 2082 memset(urse, 0, sizeof(*urse)); 2083 2084 /* Copy out the rndsource description. */ 2085 rndsource_to_user(rs, &urse->rt); 2086 2087 /* Gather the statistics. */ 2088 urse->dt_samples = rs->time_delta.insamples; 2089 urse->dt_total = 0; 2090 urse->dv_samples = rs->value_delta.insamples; 2091 urse->dv_total = urse->rt.total; 2092 percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse); 2093 } 2094 2095 static void 2096 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2097 { 2098 struct rndsource_cpu *rc = ptr; 2099 rndsource_est_t *urse = cookie; 2100 2101 urse->dt_samples = add_sat(urse->dt_samples, 2102 atomic_load_relaxed(&rc->rc_timesamples)); 2103 urse->dv_samples = add_sat(urse->dv_samples, 2104 atomic_load_relaxed(&rc->rc_datasamples)); 2105 } 2106 2107 /* 2108 * entropy_reset_xc(arg1, arg2) 2109 * 2110 * Reset the current CPU's pending entropy to zero. 2111 */ 2112 static void 2113 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 2114 { 2115 uint32_t extra = entropy_timer(); 2116 struct entropy_cpu_lock lock; 2117 struct entropy_cpu *ec; 2118 2119 /* 2120 * With the per-CPU state locked, zero the pending count and 2121 * enter a cycle count for fun. 2122 */ 2123 ec = entropy_cpu_get(&lock); 2124 ec->ec_pending = 0; 2125 entpool_enter(ec->ec_pool, &extra, sizeof extra); 2126 entropy_cpu_put(&lock, ec); 2127 } 2128 2129 /* 2130 * entropy_ioctl(cmd, data) 2131 * 2132 * Handle various /dev/random ioctl queries. 2133 */ 2134 int 2135 entropy_ioctl(unsigned long cmd, void *data) 2136 { 2137 struct krndsource *rs; 2138 bool privileged; 2139 int error; 2140 2141 KASSERT(E->stage >= ENTROPY_WARM); 2142 2143 /* Verify user's authorization to perform the ioctl. */ 2144 switch (cmd) { 2145 case RNDGETENTCNT: 2146 case RNDGETPOOLSTAT: 2147 case RNDGETSRCNUM: 2148 case RNDGETSRCNAME: 2149 case RNDGETESTNUM: 2150 case RNDGETESTNAME: 2151 error = kauth_authorize_device(kauth_cred_get(), 2152 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2153 break; 2154 case RNDCTL: 2155 error = kauth_authorize_device(kauth_cred_get(), 2156 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2157 break; 2158 case RNDADDDATA: 2159 error = kauth_authorize_device(kauth_cred_get(), 2160 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2161 /* Ascertain whether the user's inputs should be counted. */ 2162 if (kauth_authorize_device(kauth_cred_get(), 2163 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2164 NULL, NULL, NULL, NULL) == 0) 2165 privileged = true; 2166 break; 2167 default: { 2168 /* 2169 * XXX Hack to avoid changing module ABI so this can be 2170 * pulled up. Later, we can just remove the argument. 2171 */ 2172 static const struct fileops fops = { 2173 .fo_ioctl = rnd_system_ioctl, 2174 }; 2175 struct file f = { 2176 .f_ops = &fops, 2177 }; 2178 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2179 enosys(), error); 2180 #if defined(_LP64) 2181 if (error == ENOSYS) 2182 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2183 enosys(), error); 2184 #endif 2185 if (error == ENOSYS) 2186 error = ENOTTY; 2187 break; 2188 } 2189 } 2190 2191 /* If anything went wrong with authorization, stop here. */ 2192 if (error) 2193 return error; 2194 2195 /* Dispatch on the command. */ 2196 switch (cmd) { 2197 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2198 uint32_t *countp = data; 2199 2200 mutex_enter(&E->lock); 2201 *countp = ENTROPY_CAPACITY*NBBY - E->needed; 2202 mutex_exit(&E->lock); 2203 2204 break; 2205 } 2206 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2207 rndpoolstat_t *pstat = data; 2208 2209 mutex_enter(&E->lock); 2210 2211 /* parameters */ 2212 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2213 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ 2214 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2215 2216 /* state */ 2217 pstat->added = 0; /* XXX total entropy_enter count */ 2218 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; 2219 pstat->removed = 0; /* XXX total entropy_extract count */ 2220 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2221 pstat->generated = 0; /* XXX bits of data...fabricated? */ 2222 2223 mutex_exit(&E->lock); 2224 break; 2225 } 2226 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2227 rndstat_t *stat = data; 2228 uint32_t start = 0, i = 0; 2229 2230 /* Skip if none requested; fail if too many requested. */ 2231 if (stat->count == 0) 2232 break; 2233 if (stat->count > RND_MAXSTATCOUNT) 2234 return EINVAL; 2235 2236 /* 2237 * Under the lock, find the first one, copy out as many 2238 * as requested, and report how many we copied out. 2239 */ 2240 mutex_enter(&E->lock); 2241 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2242 if (error) { 2243 mutex_exit(&E->lock); 2244 return error; 2245 } 2246 LIST_FOREACH(rs, &E->sources, list) { 2247 if (start++ == stat->start) 2248 break; 2249 } 2250 while (i < stat->count && rs != NULL) { 2251 mutex_exit(&E->lock); 2252 rndsource_to_user(rs, &stat->source[i++]); 2253 mutex_enter(&E->lock); 2254 rs = LIST_NEXT(rs, list); 2255 } 2256 KASSERT(i <= stat->count); 2257 stat->count = i; 2258 rnd_unlock_sources(); 2259 mutex_exit(&E->lock); 2260 break; 2261 } 2262 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2263 rndstat_est_t *estat = data; 2264 uint32_t start = 0, i = 0; 2265 2266 /* Skip if none requested; fail if too many requested. */ 2267 if (estat->count == 0) 2268 break; 2269 if (estat->count > RND_MAXSTATCOUNT) 2270 return EINVAL; 2271 2272 /* 2273 * Under the lock, find the first one, copy out as many 2274 * as requested, and report how many we copied out. 2275 */ 2276 mutex_enter(&E->lock); 2277 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2278 if (error) { 2279 mutex_exit(&E->lock); 2280 return error; 2281 } 2282 LIST_FOREACH(rs, &E->sources, list) { 2283 if (start++ == estat->start) 2284 break; 2285 } 2286 while (i < estat->count && rs != NULL) { 2287 mutex_exit(&E->lock); 2288 rndsource_to_user_est(rs, &estat->source[i++]); 2289 mutex_enter(&E->lock); 2290 rs = LIST_NEXT(rs, list); 2291 } 2292 KASSERT(i <= estat->count); 2293 estat->count = i; 2294 rnd_unlock_sources(); 2295 mutex_exit(&E->lock); 2296 break; 2297 } 2298 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2299 rndstat_name_t *nstat = data; 2300 const size_t n = sizeof(rs->name); 2301 2302 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2303 2304 /* 2305 * Under the lock, search by name. If found, copy it 2306 * out; if not found, fail with ENOENT. 2307 */ 2308 mutex_enter(&E->lock); 2309 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2310 if (error) { 2311 mutex_exit(&E->lock); 2312 return error; 2313 } 2314 LIST_FOREACH(rs, &E->sources, list) { 2315 if (strncmp(rs->name, nstat->name, n) == 0) 2316 break; 2317 } 2318 if (rs != NULL) { 2319 mutex_exit(&E->lock); 2320 rndsource_to_user(rs, &nstat->source); 2321 mutex_enter(&E->lock); 2322 } else { 2323 error = ENOENT; 2324 } 2325 rnd_unlock_sources(); 2326 mutex_exit(&E->lock); 2327 break; 2328 } 2329 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2330 rndstat_est_name_t *enstat = data; 2331 const size_t n = sizeof(rs->name); 2332 2333 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2334 2335 /* 2336 * Under the lock, search by name. If found, copy it 2337 * out; if not found, fail with ENOENT. 2338 */ 2339 mutex_enter(&E->lock); 2340 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG); 2341 if (error) { 2342 mutex_exit(&E->lock); 2343 return error; 2344 } 2345 LIST_FOREACH(rs, &E->sources, list) { 2346 if (strncmp(rs->name, enstat->name, n) == 0) 2347 break; 2348 } 2349 if (rs != NULL) { 2350 mutex_exit(&E->lock); 2351 rndsource_to_user_est(rs, &enstat->source); 2352 mutex_enter(&E->lock); 2353 } else { 2354 error = ENOENT; 2355 } 2356 rnd_unlock_sources(); 2357 mutex_exit(&E->lock); 2358 break; 2359 } 2360 case RNDCTL: { /* Modify entropy source flags. */ 2361 rndctl_t *rndctl = data; 2362 const size_t n = sizeof(rs->name); 2363 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2364 uint32_t flags; 2365 bool reset = false, request = false; 2366 2367 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2368 2369 /* Whitelist the flags that user can change. */ 2370 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2371 2372 /* 2373 * For each matching rndsource, either by type if 2374 * specified or by name if not, set the masked flags. 2375 */ 2376 mutex_enter(&E->lock); 2377 LIST_FOREACH(rs, &E->sources, list) { 2378 if (rndctl->type != 0xff) { 2379 if (rs->type != rndctl->type) 2380 continue; 2381 } else { 2382 if (strncmp(rs->name, rndctl->name, n) != 0) 2383 continue; 2384 } 2385 flags = rs->flags & ~rndctl->mask; 2386 flags |= rndctl->flags & rndctl->mask; 2387 if ((rs->flags & resetflags) == 0 && 2388 (flags & resetflags) != 0) 2389 reset = true; 2390 if ((rs->flags ^ flags) & resetflags) 2391 request = true; 2392 atomic_store_relaxed(&rs->flags, flags); 2393 } 2394 mutex_exit(&E->lock); 2395 2396 /* 2397 * If we disabled estimation or collection, nix all the 2398 * pending entropy and set needed to the maximum. 2399 */ 2400 if (reset) { 2401 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2402 mutex_enter(&E->lock); 2403 E->pending = 0; 2404 atomic_store_relaxed(&E->needed, 2405 ENTROPY_CAPACITY*NBBY); 2406 mutex_exit(&E->lock); 2407 } 2408 2409 /* 2410 * If we changed any of the estimation or collection 2411 * flags, request new samples from everyone -- either 2412 * to make up for what we just lost, or to get new 2413 * samples from what we just added. 2414 * 2415 * Failing on signal, while waiting for another process 2416 * to finish requesting entropy, is OK here even though 2417 * we have committed side effects, because this ioctl 2418 * command is idempotent, so repeating it is safe. 2419 */ 2420 if (request) { 2421 mutex_enter(&E->lock); 2422 error = entropy_request(ENTROPY_CAPACITY, 2423 ENTROPY_WAIT|ENTROPY_SIG); 2424 mutex_exit(&E->lock); 2425 } 2426 break; 2427 } 2428 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2429 rnddata_t *rdata = data; 2430 unsigned entropybits = 0; 2431 2432 if (!atomic_load_relaxed(&entropy_collection)) 2433 break; /* thanks but no thanks */ 2434 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2435 return EINVAL; 2436 2437 /* 2438 * This ioctl serves as the userland alternative a 2439 * bootloader-provided seed -- typically furnished by 2440 * /etc/rc.d/random_seed. We accept the user's entropy 2441 * claim only if 2442 * 2443 * (a) the user is privileged, and 2444 * (b) we have not entered a bootloader seed. 2445 * 2446 * under the assumption that the user may use this to 2447 * load a seed from disk that we have already loaded 2448 * from the bootloader, so we don't double-count it. 2449 */ 2450 if (privileged && rdata->entropy && rdata->len) { 2451 mutex_enter(&E->lock); 2452 if (!E->seeded) { 2453 entropybits = MIN(rdata->entropy, 2454 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2455 E->seeded = true; 2456 } 2457 mutex_exit(&E->lock); 2458 } 2459 2460 /* Enter the data and consolidate entropy. */ 2461 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2462 entropybits); 2463 entropy_consolidate(); 2464 break; 2465 } 2466 default: 2467 error = ENOTTY; 2468 } 2469 2470 /* Return any error that may have come up. */ 2471 return error; 2472 } 2473 2474 /* Legacy entry points */ 2475 2476 void 2477 rnd_seed(void *seed, size_t len) 2478 { 2479 2480 if (len != sizeof(rndsave_t)) { 2481 printf("entropy: invalid seed length: %zu," 2482 " expected sizeof(rndsave_t) = %zu\n", 2483 len, sizeof(rndsave_t)); 2484 return; 2485 } 2486 entropy_seed(seed); 2487 } 2488 2489 void 2490 rnd_init(void) 2491 { 2492 2493 entropy_init(); 2494 } 2495 2496 void 2497 rnd_init_softint(void) 2498 { 2499 2500 entropy_init_late(); 2501 entropy_bootrequest(); 2502 } 2503 2504 int 2505 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2506 { 2507 2508 return entropy_ioctl(cmd, data); 2509 } 2510