1 /* $NetBSD: kern_entropy.c,v 1.23 2020/08/14 00:53:16 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * No entropy estimation based on the sample values, which is a 66 * contradiction in terms and a potential source of side 67 * channels. It is the responsibility of the driver author to 68 * study how predictable the physical source of input can ever 69 * be, and to furnish a lower bound on the amount of entropy it 70 * has. 71 * 72 * * Entropy depletion is available for testing (or if you're into 73 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 74 * the logic to support it is small, to minimize chance of bugs. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.23 2020/08/14 00:53:16 riastradh Exp $"); 79 80 #include <sys/param.h> 81 #include <sys/types.h> 82 #include <sys/atomic.h> 83 #include <sys/compat_stub.h> 84 #include <sys/condvar.h> 85 #include <sys/cpu.h> 86 #include <sys/entropy.h> 87 #include <sys/errno.h> 88 #include <sys/evcnt.h> 89 #include <sys/event.h> 90 #include <sys/file.h> 91 #include <sys/intr.h> 92 #include <sys/kauth.h> 93 #include <sys/kernel.h> 94 #include <sys/kmem.h> 95 #include <sys/kthread.h> 96 #include <sys/module_hook.h> 97 #include <sys/mutex.h> 98 #include <sys/percpu.h> 99 #include <sys/poll.h> 100 #include <sys/queue.h> 101 #include <sys/rnd.h> /* legacy kernel API */ 102 #include <sys/rndio.h> /* userland ioctl interface */ 103 #include <sys/rndsource.h> /* kernel rndsource driver API */ 104 #include <sys/select.h> 105 #include <sys/selinfo.h> 106 #include <sys/sha1.h> /* for boot seed checksum */ 107 #include <sys/stdint.h> 108 #include <sys/sysctl.h> 109 #include <sys/systm.h> 110 #include <sys/time.h> 111 #include <sys/xcall.h> 112 113 #include <lib/libkern/entpool.h> 114 115 #include <machine/limits.h> 116 117 #ifdef __HAVE_CPU_COUNTER 118 #include <machine/cpu_counter.h> 119 #endif 120 121 /* 122 * struct entropy_cpu 123 * 124 * Per-CPU entropy state. The pool is allocated separately 125 * because percpu(9) sometimes moves per-CPU objects around 126 * without zeroing them, which would lead to unwanted copies of 127 * sensitive secrets. The evcnt is allocated separately becuase 128 * evcnt(9) assumes it stays put in memory. 129 */ 130 struct entropy_cpu { 131 struct evcnt *ec_softint_evcnt; 132 struct entpool *ec_pool; 133 unsigned ec_pending; 134 bool ec_locked; 135 }; 136 137 /* 138 * struct rndsource_cpu 139 * 140 * Per-CPU rndsource state. 141 */ 142 struct rndsource_cpu { 143 unsigned rc_nbits; /* bits of entropy added */ 144 }; 145 146 /* 147 * entropy_global (a.k.a. E for short in this file) 148 * 149 * Global entropy state. Writes protected by the global lock. 150 * Some fields, marked (A), can be read outside the lock, and are 151 * maintained with atomic_load/store_relaxed. 152 */ 153 struct { 154 kmutex_t lock; /* covers all global state */ 155 struct entpool pool; /* global pool for extraction */ 156 unsigned needed; /* (A) needed globally */ 157 unsigned pending; /* (A) pending in per-CPU pools */ 158 unsigned timestamp; /* (A) time of last consolidation */ 159 unsigned epoch; /* (A) changes when needed -> 0 */ 160 kcondvar_t cv; /* notifies state changes */ 161 struct selinfo selq; /* notifies needed -> 0 */ 162 struct lwp *sourcelock; /* lock on list of sources */ 163 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 164 enum entropy_stage { 165 ENTROPY_COLD = 0, /* single-threaded */ 166 ENTROPY_WARM, /* multi-threaded at boot before CPUs */ 167 ENTROPY_HOT, /* multi-threaded multi-CPU */ 168 } stage; 169 bool consolidate; /* kick thread to consolidate */ 170 bool seed_rndsource; /* true if seed source is attached */ 171 bool seeded; /* true if seed file already loaded */ 172 } entropy_global __cacheline_aligned = { 173 /* Fields that must be initialized when the kernel is loaded. */ 174 .needed = ENTROPY_CAPACITY*NBBY, 175 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 176 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 177 .stage = ENTROPY_COLD, 178 }; 179 180 #define E (&entropy_global) /* declutter */ 181 182 /* Read-mostly globals */ 183 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 184 static void *entropy_sih __read_mostly; /* softint handler */ 185 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 186 187 int rnd_initial_entropy __read_mostly; /* XXX legacy */ 188 189 static struct krndsource seed_rndsource __read_mostly; 190 191 /* 192 * Event counters 193 * 194 * Must be careful with adding these because they can serve as 195 * side channels. 196 */ 197 static struct evcnt entropy_discretionary_evcnt = 198 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 199 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 200 static struct evcnt entropy_immediate_evcnt = 201 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 202 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 203 static struct evcnt entropy_partial_evcnt = 204 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 205 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 206 static struct evcnt entropy_consolidate_evcnt = 207 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 208 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 209 static struct evcnt entropy_extract_intr_evcnt = 210 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr"); 211 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt); 212 static struct evcnt entropy_extract_fail_evcnt = 213 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 214 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 215 static struct evcnt entropy_request_evcnt = 216 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 217 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 218 static struct evcnt entropy_deplete_evcnt = 219 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 220 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 221 static struct evcnt entropy_notify_evcnt = 222 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 223 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 224 225 /* Sysctl knobs */ 226 static bool entropy_collection = 1; 227 static bool entropy_depletion = 0; /* Silly! */ 228 229 static const struct sysctlnode *entropy_sysctlroot; 230 static struct sysctllog *entropy_sysctllog; 231 232 /* Forward declarations */ 233 static void entropy_init_cpu(void *, void *, struct cpu_info *); 234 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 235 static void entropy_account_cpu(struct entropy_cpu *); 236 static void entropy_enter(const void *, size_t, unsigned); 237 static bool entropy_enter_intr(const void *, size_t, unsigned); 238 static void entropy_softintr(void *); 239 static void entropy_thread(void *); 240 static uint32_t entropy_pending(void); 241 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 242 static void entropy_do_consolidate(void); 243 static void entropy_consolidate_xc(void *, void *); 244 static void entropy_notify(void); 245 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 246 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 247 static void filt_entropy_read_detach(struct knote *); 248 static int filt_entropy_read_event(struct knote *, long); 249 static void entropy_request(size_t); 250 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 251 uint32_t); 252 static unsigned rndsource_entropybits(struct krndsource *); 253 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 254 static void rndsource_to_user(struct krndsource *, rndsource_t *); 255 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 256 257 /* 258 * entropy_timer() 259 * 260 * Cycle counter, time counter, or anything that changes a wee bit 261 * unpredictably. 262 */ 263 static inline uint32_t 264 entropy_timer(void) 265 { 266 struct bintime bt; 267 uint32_t v; 268 269 /* If we have a CPU cycle counter, use the low 32 bits. */ 270 #ifdef __HAVE_CPU_COUNTER 271 if (__predict_true(cpu_hascounter())) 272 return cpu_counter32(); 273 #endif /* __HAVE_CPU_COUNTER */ 274 275 /* If we're cold, tough. Can't binuptime while cold. */ 276 if (__predict_false(cold)) 277 return 0; 278 279 /* Fold the 128 bits of binuptime into 32 bits. */ 280 binuptime(&bt); 281 v = bt.frac; 282 v ^= bt.frac >> 32; 283 v ^= bt.sec; 284 v ^= bt.sec >> 32; 285 return v; 286 } 287 288 static void 289 attach_seed_rndsource(void) 290 { 291 292 /* 293 * First called no later than entropy_init, while we are still 294 * single-threaded, so no need for RUN_ONCE. 295 */ 296 if (E->stage >= ENTROPY_WARM || E->seed_rndsource) 297 return; 298 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 299 RND_FLAG_COLLECT_VALUE); 300 E->seed_rndsource = true; 301 } 302 303 /* 304 * entropy_init() 305 * 306 * Initialize the entropy subsystem. Panic on failure. 307 * 308 * Requires percpu(9) and sysctl(9) to be initialized. 309 */ 310 static void 311 entropy_init(void) 312 { 313 uint32_t extra[2]; 314 struct krndsource *rs; 315 unsigned i = 0; 316 317 KASSERT(E->stage == ENTROPY_COLD); 318 319 /* Grab some cycle counts early at boot. */ 320 extra[i++] = entropy_timer(); 321 322 /* Run the entropy pool cryptography self-test. */ 323 if (entpool_selftest() == -1) 324 panic("entropy pool crypto self-test failed"); 325 326 /* Create the sysctl directory. */ 327 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 328 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 329 SYSCTL_DESCR("Entropy (random number sources) options"), 330 NULL, 0, NULL, 0, 331 CTL_KERN, CTL_CREATE, CTL_EOL); 332 333 /* Create the sysctl knobs. */ 334 /* XXX These shouldn't be writable at securelevel>0. */ 335 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 336 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 337 SYSCTL_DESCR("Automatically collect entropy from hardware"), 338 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 339 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 340 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 341 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 342 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 343 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 344 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 345 SYSCTL_DESCR("Trigger entropy consolidation now"), 346 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 347 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 348 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 349 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 350 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 351 /* XXX These should maybe not be readable at securelevel>0. */ 352 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 353 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 354 "needed", SYSCTL_DESCR("Systemwide entropy deficit"), 355 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); 356 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 357 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 358 "pending", SYSCTL_DESCR("Entropy pending on CPUs"), 359 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); 360 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 361 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 362 "epoch", SYSCTL_DESCR("Entropy epoch"), 363 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 364 365 /* Initialize the global state for multithreaded operation. */ 366 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); 367 cv_init(&E->cv, "entropy"); 368 selinit(&E->selq); 369 370 /* Make sure the seed source is attached. */ 371 attach_seed_rndsource(); 372 373 /* Note if the bootloader didn't provide a seed. */ 374 if (!E->seeded) 375 printf("entropy: no seed from bootloader\n"); 376 377 /* Allocate the per-CPU records for all early entropy sources. */ 378 LIST_FOREACH(rs, &E->sources, list) 379 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 380 381 /* Enter the boot cycle count to get started. */ 382 extra[i++] = entropy_timer(); 383 KASSERT(i == __arraycount(extra)); 384 entropy_enter(extra, sizeof extra, 0); 385 explicit_memset(extra, 0, sizeof extra); 386 387 /* We are now ready for multi-threaded operation. */ 388 E->stage = ENTROPY_WARM; 389 } 390 391 /* 392 * entropy_init_late() 393 * 394 * Late initialization. Panic on failure. 395 * 396 * Requires CPUs to have been detected and LWPs to have started. 397 */ 398 static void 399 entropy_init_late(void) 400 { 401 int error; 402 403 KASSERT(E->stage == ENTROPY_WARM); 404 405 /* Allocate and initialize the per-CPU state. */ 406 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 407 entropy_init_cpu, entropy_fini_cpu, NULL); 408 409 /* 410 * Establish the softint at the highest softint priority level. 411 * Must happen after CPU detection. 412 */ 413 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 414 &entropy_softintr, NULL); 415 if (entropy_sih == NULL) 416 panic("unable to establish entropy softint"); 417 418 /* 419 * Create the entropy housekeeping thread. Must happen after 420 * lwpinit. 421 */ 422 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 423 entropy_thread, NULL, &entropy_lwp, "entbutler"); 424 if (error) 425 panic("unable to create entropy housekeeping thread: %d", 426 error); 427 428 /* 429 * Wait until the per-CPU initialization has hit all CPUs 430 * before proceeding to mark the entropy system hot. 431 */ 432 xc_barrier(XC_HIGHPRI); 433 E->stage = ENTROPY_HOT; 434 } 435 436 /* 437 * entropy_init_cpu(ptr, cookie, ci) 438 * 439 * percpu(9) constructor for per-CPU entropy pool. 440 */ 441 static void 442 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 443 { 444 struct entropy_cpu *ec = ptr; 445 446 ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), 447 KM_SLEEP); 448 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 449 ec->ec_pending = 0; 450 ec->ec_locked = false; 451 452 evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, 453 ci->ci_cpuname, "entropy softint"); 454 } 455 456 /* 457 * entropy_fini_cpu(ptr, cookie, ci) 458 * 459 * percpu(9) destructor for per-CPU entropy pool. 460 */ 461 static void 462 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 463 { 464 struct entropy_cpu *ec = ptr; 465 466 /* 467 * Zero any lingering data. Disclosure of the per-CPU pool 468 * shouldn't retroactively affect the security of any keys 469 * generated, because entpool(9) erases whatever we have just 470 * drawn out of any pool, but better safe than sorry. 471 */ 472 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 473 474 evcnt_detach(ec->ec_softint_evcnt); 475 476 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 477 kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); 478 } 479 480 /* 481 * entropy_seed(seed) 482 * 483 * Seed the entropy pool with seed. Meant to be called as early 484 * as possible by the bootloader; may be called before or after 485 * entropy_init. Must be called before system reaches userland. 486 * Must be called in thread or soft interrupt context, not in hard 487 * interrupt context. Must be called at most once. 488 * 489 * Overwrites the seed in place. Caller may then free the memory. 490 */ 491 static void 492 entropy_seed(rndsave_t *seed) 493 { 494 SHA1_CTX ctx; 495 uint8_t digest[SHA1_DIGEST_LENGTH]; 496 bool seeded; 497 498 /* 499 * Verify the checksum. If the checksum fails, take the data 500 * but ignore the entropy estimate -- the file may have been 501 * incompletely written with garbage, which is harmless to add 502 * but may not be as unpredictable as alleged. 503 */ 504 SHA1Init(&ctx); 505 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 506 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 507 SHA1Final(digest, &ctx); 508 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 509 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 510 printf("entropy: invalid seed checksum\n"); 511 seed->entropy = 0; 512 } 513 explicit_memset(&ctx, 0, sizeof ctx); 514 explicit_memset(digest, 0, sizeof digest); 515 516 /* 517 * If the entropy is insensibly large, try byte-swapping. 518 * Otherwise assume the file is corrupted and act as though it 519 * has zero entropy. 520 */ 521 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 522 seed->entropy = bswap32(seed->entropy); 523 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 524 seed->entropy = 0; 525 } 526 527 /* Make sure the seed source is attached. */ 528 attach_seed_rndsource(); 529 530 /* Test and set E->seeded. */ 531 if (E->stage >= ENTROPY_WARM) 532 mutex_enter(&E->lock); 533 seeded = E->seeded; 534 E->seeded = (seed->entropy > 0); 535 if (E->stage >= ENTROPY_WARM) 536 mutex_exit(&E->lock); 537 538 /* 539 * If we've been seeded, may be re-entering the same seed 540 * (e.g., bootloader vs module init, or something). No harm in 541 * entering it twice, but it contributes no additional entropy. 542 */ 543 if (seeded) { 544 printf("entropy: double-seeded by bootloader\n"); 545 seed->entropy = 0; 546 } else { 547 printf("entropy: entering seed from bootloader" 548 " with %u bits of entropy\n", (unsigned)seed->entropy); 549 } 550 551 /* Enter it into the pool and promptly zero it. */ 552 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 553 seed->entropy); 554 explicit_memset(seed, 0, sizeof(*seed)); 555 } 556 557 /* 558 * entropy_bootrequest() 559 * 560 * Request entropy from all sources at boot, once config is 561 * complete and interrupts are running. 562 */ 563 void 564 entropy_bootrequest(void) 565 { 566 567 KASSERT(E->stage >= ENTROPY_WARM); 568 569 /* 570 * Request enough to satisfy the maximum entropy shortage. 571 * This is harmless overkill if the bootloader provided a seed. 572 */ 573 mutex_enter(&E->lock); 574 entropy_request(ENTROPY_CAPACITY); 575 mutex_exit(&E->lock); 576 } 577 578 /* 579 * entropy_epoch() 580 * 581 * Returns the current entropy epoch. If this changes, you should 582 * reseed. If -1, means system entropy has not yet reached full 583 * entropy or been explicitly consolidated; never reverts back to 584 * -1. Never zero, so you can always use zero as an uninitialized 585 * sentinel value meaning `reseed ASAP'. 586 * 587 * Usage model: 588 * 589 * struct foo { 590 * struct crypto_prng prng; 591 * unsigned epoch; 592 * } *foo; 593 * 594 * unsigned epoch = entropy_epoch(); 595 * if (__predict_false(epoch != foo->epoch)) { 596 * uint8_t seed[32]; 597 * if (entropy_extract(seed, sizeof seed, 0) != 0) 598 * warn("no entropy"); 599 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 600 * foo->epoch = epoch; 601 * } 602 */ 603 unsigned 604 entropy_epoch(void) 605 { 606 607 /* 608 * Unsigned int, so no need for seqlock for an atomic read, but 609 * make sure we read it afresh each time. 610 */ 611 return atomic_load_relaxed(&E->epoch); 612 } 613 614 /* 615 * entropy_ready() 616 * 617 * True if the entropy pool has full entropy. 618 */ 619 bool 620 entropy_ready(void) 621 { 622 623 return atomic_load_relaxed(&E->needed) == 0; 624 } 625 626 /* 627 * entropy_account_cpu(ec) 628 * 629 * Consider whether to consolidate entropy into the global pool 630 * after we just added some into the current CPU's pending pool. 631 * 632 * - If this CPU can provide enough entropy now, do so. 633 * 634 * - If this and whatever else is available on other CPUs can 635 * provide enough entropy, kick the consolidation thread. 636 * 637 * - Otherwise, do as little as possible, except maybe consolidate 638 * entropy at most once a minute. 639 * 640 * Caller must be bound to a CPU and therefore have exclusive 641 * access to ec. Will acquire and release the global lock. 642 */ 643 static void 644 entropy_account_cpu(struct entropy_cpu *ec) 645 { 646 unsigned diff; 647 648 KASSERT(E->stage == ENTROPY_HOT); 649 650 /* 651 * If there's no entropy needed, and entropy has been 652 * consolidated in the last minute, do nothing. 653 */ 654 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 655 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 656 __predict_true((time_uptime - E->timestamp) <= 60)) 657 return; 658 659 /* If there's nothing pending, stop here. */ 660 if (ec->ec_pending == 0) 661 return; 662 663 /* Consider consolidation, under the lock. */ 664 mutex_enter(&E->lock); 665 if (E->needed != 0 && E->needed <= ec->ec_pending) { 666 /* 667 * If we have not yet attained full entropy but we can 668 * now, do so. This way we disseminate entropy 669 * promptly when it becomes available early at boot; 670 * otherwise we leave it to the entropy consolidation 671 * thread, which is rate-limited to mitigate side 672 * channels and abuse. 673 */ 674 uint8_t buf[ENTPOOL_CAPACITY]; 675 676 /* Transfer from the local pool to the global pool. */ 677 entpool_extract(ec->ec_pool, buf, sizeof buf); 678 entpool_enter(&E->pool, buf, sizeof buf); 679 atomic_store_relaxed(&ec->ec_pending, 0); 680 atomic_store_relaxed(&E->needed, 0); 681 682 /* Notify waiters that we now have full entropy. */ 683 entropy_notify(); 684 entropy_immediate_evcnt.ev_count++; 685 } else { 686 /* Record how much we can add to the global pool. */ 687 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); 688 E->pending += diff; 689 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); 690 691 /* 692 * This should have made a difference unless we were 693 * already saturated. 694 */ 695 KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY); 696 KASSERT(E->pending); 697 698 if (E->needed <= E->pending) { 699 /* 700 * Enough entropy between all the per-CPU 701 * pools. Wake up the housekeeping thread. 702 * 703 * If we don't need any entropy, this doesn't 704 * mean much, but it is the only time we ever 705 * gather additional entropy in case the 706 * accounting has been overly optimistic. This 707 * happens at most once a minute, so there's 708 * negligible performance cost. 709 */ 710 E->consolidate = true; 711 cv_broadcast(&E->cv); 712 if (E->needed == 0) 713 entropy_discretionary_evcnt.ev_count++; 714 } else { 715 /* Can't get full entropy. Keep gathering. */ 716 entropy_partial_evcnt.ev_count++; 717 } 718 } 719 mutex_exit(&E->lock); 720 } 721 722 /* 723 * entropy_enter_early(buf, len, nbits) 724 * 725 * Do entropy bookkeeping globally, before we have established 726 * per-CPU pools. Enter directly into the global pool in the hope 727 * that we enter enough before the first entropy_extract to thwart 728 * iterative-guessing attacks; entropy_extract will warn if not. 729 */ 730 static void 731 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 732 { 733 bool notify = false; 734 735 if (E->stage >= ENTROPY_WARM) 736 mutex_enter(&E->lock); 737 738 /* Enter it into the pool. */ 739 entpool_enter(&E->pool, buf, len); 740 741 /* 742 * Decide whether to notify reseed -- we will do so if either: 743 * (a) we transition from partial entropy to full entropy, or 744 * (b) we get a batch of full entropy all at once. 745 */ 746 notify |= (E->needed && E->needed <= nbits); 747 notify |= (nbits >= ENTROPY_CAPACITY*NBBY); 748 749 /* Subtract from the needed count and notify if appropriate. */ 750 E->needed -= MIN(E->needed, nbits); 751 if (notify) { 752 entropy_notify(); 753 entropy_immediate_evcnt.ev_count++; 754 } 755 756 if (E->stage >= ENTROPY_WARM) 757 mutex_exit(&E->lock); 758 } 759 760 /* 761 * entropy_enter(buf, len, nbits) 762 * 763 * Enter len bytes of data from buf into the system's entropy 764 * pool, stirring as necessary when the internal buffer fills up. 765 * nbits is a lower bound on the number of bits of entropy in the 766 * process that led to this sample. 767 */ 768 static void 769 entropy_enter(const void *buf, size_t len, unsigned nbits) 770 { 771 struct entropy_cpu *ec; 772 uint32_t pending; 773 int s; 774 775 KASSERTMSG(!cpu_intr_p(), 776 "use entropy_enter_intr from interrupt context"); 777 KASSERTMSG(howmany(nbits, NBBY) <= len, 778 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 779 780 /* If it's too early after boot, just use entropy_enter_early. */ 781 if (__predict_false(E->stage < ENTROPY_HOT)) { 782 entropy_enter_early(buf, len, nbits); 783 return; 784 } 785 786 /* 787 * Acquire the per-CPU state, blocking soft interrupts and 788 * causing hard interrupts to drop samples on the floor. 789 */ 790 ec = percpu_getref(entropy_percpu); 791 s = splsoftserial(); 792 KASSERT(!ec->ec_locked); 793 ec->ec_locked = true; 794 __insn_barrier(); 795 796 /* Enter into the per-CPU pool. */ 797 entpool_enter(ec->ec_pool, buf, len); 798 799 /* Count up what we can add. */ 800 pending = ec->ec_pending; 801 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 802 atomic_store_relaxed(&ec->ec_pending, pending); 803 804 /* Consolidate globally if appropriate based on what we added. */ 805 entropy_account_cpu(ec); 806 807 /* Release the per-CPU state. */ 808 KASSERT(ec->ec_locked); 809 __insn_barrier(); 810 ec->ec_locked = false; 811 splx(s); 812 percpu_putref(entropy_percpu); 813 } 814 815 /* 816 * entropy_enter_intr(buf, len, nbits) 817 * 818 * Enter up to len bytes of data from buf into the system's 819 * entropy pool without stirring. nbits is a lower bound on the 820 * number of bits of entropy in the process that led to this 821 * sample. If the sample could be entered completely, assume 822 * nbits of entropy pending; otherwise assume none, since we don't 823 * know whether some parts of the sample are constant, for 824 * instance. Schedule a softint to stir the entropy pool if 825 * needed. Return true if used fully, false if truncated at all. 826 * 827 * Using this in thread context will work, but you might as well 828 * use entropy_enter in that case. 829 */ 830 static bool 831 entropy_enter_intr(const void *buf, size_t len, unsigned nbits) 832 { 833 struct entropy_cpu *ec; 834 bool fullyused = false; 835 uint32_t pending; 836 837 KASSERTMSG(howmany(nbits, NBBY) <= len, 838 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 839 840 /* If it's too early after boot, just use entropy_enter_early. */ 841 if (__predict_false(E->stage < ENTROPY_HOT)) { 842 entropy_enter_early(buf, len, nbits); 843 return true; 844 } 845 846 /* 847 * Acquire the per-CPU state. If someone is in the middle of 848 * using it, drop the sample. Otherwise, take the lock so that 849 * higher-priority interrupts will drop their samples. 850 */ 851 ec = percpu_getref(entropy_percpu); 852 if (ec->ec_locked) 853 goto out0; 854 ec->ec_locked = true; 855 __insn_barrier(); 856 857 /* 858 * Enter as much as we can into the per-CPU pool. If it was 859 * truncated, schedule a softint to stir the pool and stop. 860 */ 861 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 862 softint_schedule(entropy_sih); 863 goto out1; 864 } 865 fullyused = true; 866 867 /* Count up what we can contribute. */ 868 pending = ec->ec_pending; 869 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 870 atomic_store_relaxed(&ec->ec_pending, pending); 871 872 /* Schedule a softint if we added anything and it matters. */ 873 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || 874 atomic_load_relaxed(&entropy_depletion)) && 875 nbits != 0) 876 softint_schedule(entropy_sih); 877 878 out1: /* Release the per-CPU state. */ 879 KASSERT(ec->ec_locked); 880 __insn_barrier(); 881 ec->ec_locked = false; 882 out0: percpu_putref(entropy_percpu); 883 884 return fullyused; 885 } 886 887 /* 888 * entropy_softintr(cookie) 889 * 890 * Soft interrupt handler for entering entropy. Takes care of 891 * stirring the local CPU's entropy pool if it filled up during 892 * hard interrupts, and promptly crediting entropy from the local 893 * CPU's entropy pool to the global entropy pool if needed. 894 */ 895 static void 896 entropy_softintr(void *cookie) 897 { 898 struct entropy_cpu *ec; 899 900 /* 901 * Acquire the per-CPU state. Other users can lock this only 902 * while soft interrupts are blocked. Cause hard interrupts to 903 * drop samples on the floor. 904 */ 905 ec = percpu_getref(entropy_percpu); 906 KASSERT(!ec->ec_locked); 907 ec->ec_locked = true; 908 __insn_barrier(); 909 910 /* Count statistics. */ 911 ec->ec_softint_evcnt->ev_count++; 912 913 /* Stir the pool if necessary. */ 914 entpool_stir(ec->ec_pool); 915 916 /* Consolidate globally if appropriate based on what we added. */ 917 entropy_account_cpu(ec); 918 919 /* Release the per-CPU state. */ 920 KASSERT(ec->ec_locked); 921 __insn_barrier(); 922 ec->ec_locked = false; 923 percpu_putref(entropy_percpu); 924 } 925 926 /* 927 * entropy_thread(cookie) 928 * 929 * Handle any asynchronous entropy housekeeping. 930 */ 931 static void 932 entropy_thread(void *cookie) 933 { 934 bool consolidate; 935 936 for (;;) { 937 /* 938 * Wait until there's full entropy somewhere among the 939 * CPUs, as confirmed at most once per minute, or 940 * someone wants to consolidate. 941 */ 942 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { 943 consolidate = true; 944 } else { 945 mutex_enter(&E->lock); 946 if (!E->consolidate) 947 cv_timedwait(&E->cv, &E->lock, 60*hz); 948 consolidate = E->consolidate; 949 E->consolidate = false; 950 mutex_exit(&E->lock); 951 } 952 953 if (consolidate) { 954 /* Do it. */ 955 entropy_do_consolidate(); 956 957 /* Mitigate abuse. */ 958 kpause("entropy", false, hz, NULL); 959 } 960 } 961 } 962 963 /* 964 * entropy_pending() 965 * 966 * Count up the amount of entropy pending on other CPUs. 967 */ 968 static uint32_t 969 entropy_pending(void) 970 { 971 uint32_t pending = 0; 972 973 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); 974 return pending; 975 } 976 977 static void 978 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 979 { 980 struct entropy_cpu *ec = ptr; 981 uint32_t *pendingp = cookie; 982 uint32_t cpu_pending; 983 984 cpu_pending = atomic_load_relaxed(&ec->ec_pending); 985 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); 986 } 987 988 /* 989 * entropy_do_consolidate() 990 * 991 * Issue a cross-call to gather entropy on all CPUs and advance 992 * the entropy epoch. 993 */ 994 static void 995 entropy_do_consolidate(void) 996 { 997 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 998 static struct timeval lasttime; /* serialized by E->lock */ 999 struct entpool pool; 1000 uint8_t buf[ENTPOOL_CAPACITY]; 1001 unsigned diff; 1002 uint64_t ticket; 1003 1004 /* Gather entropy on all CPUs into a temporary pool. */ 1005 memset(&pool, 0, sizeof pool); 1006 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1007 xc_wait(ticket); 1008 1009 /* Acquire the lock to notify waiters. */ 1010 mutex_enter(&E->lock); 1011 1012 /* Count another consolidation. */ 1013 entropy_consolidate_evcnt.ev_count++; 1014 1015 /* Note when we last consolidated, i.e. now. */ 1016 E->timestamp = time_uptime; 1017 1018 /* Mix what we gathered into the global pool. */ 1019 entpool_extract(&pool, buf, sizeof buf); 1020 entpool_enter(&E->pool, buf, sizeof buf); 1021 explicit_memset(&pool, 0, sizeof pool); 1022 1023 /* Count the entropy that was gathered. */ 1024 diff = MIN(E->needed, E->pending); 1025 atomic_store_relaxed(&E->needed, E->needed - diff); 1026 E->pending -= diff; 1027 if (__predict_false(E->needed > 0)) { 1028 if (ratecheck(&lasttime, &interval)) 1029 printf("entropy: WARNING:" 1030 " consolidating less than full entropy\n"); 1031 } 1032 1033 /* Advance the epoch and notify waiters. */ 1034 entropy_notify(); 1035 1036 /* Release the lock. */ 1037 mutex_exit(&E->lock); 1038 } 1039 1040 /* 1041 * entropy_consolidate_xc(vpool, arg2) 1042 * 1043 * Extract output from the local CPU's input pool and enter it 1044 * into a temporary pool passed as vpool. 1045 */ 1046 static void 1047 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1048 { 1049 struct entpool *pool = vpool; 1050 struct entropy_cpu *ec; 1051 uint8_t buf[ENTPOOL_CAPACITY]; 1052 uint32_t extra[7]; 1053 unsigned i = 0; 1054 int s; 1055 1056 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1057 extra[i++] = cpu_number(); 1058 extra[i++] = entropy_timer(); 1059 1060 /* 1061 * Acquire the per-CPU state, blocking soft interrupts and 1062 * discarding entropy in hard interrupts, so that we can 1063 * extract from the per-CPU pool. 1064 */ 1065 ec = percpu_getref(entropy_percpu); 1066 s = splsoftserial(); 1067 KASSERT(!ec->ec_locked); 1068 ec->ec_locked = true; 1069 __insn_barrier(); 1070 extra[i++] = entropy_timer(); 1071 1072 /* Extract the data and count it no longer pending. */ 1073 entpool_extract(ec->ec_pool, buf, sizeof buf); 1074 atomic_store_relaxed(&ec->ec_pending, 0); 1075 extra[i++] = entropy_timer(); 1076 1077 /* Release the per-CPU state. */ 1078 KASSERT(ec->ec_locked); 1079 __insn_barrier(); 1080 ec->ec_locked = false; 1081 splx(s); 1082 percpu_putref(entropy_percpu); 1083 extra[i++] = entropy_timer(); 1084 1085 /* 1086 * Copy over statistics, and enter the per-CPU extract and the 1087 * extra timing into the temporary pool, under the global lock. 1088 */ 1089 mutex_enter(&E->lock); 1090 extra[i++] = entropy_timer(); 1091 entpool_enter(pool, buf, sizeof buf); 1092 explicit_memset(buf, 0, sizeof buf); 1093 extra[i++] = entropy_timer(); 1094 KASSERT(i == __arraycount(extra)); 1095 entpool_enter(pool, extra, sizeof extra); 1096 explicit_memset(extra, 0, sizeof extra); 1097 mutex_exit(&E->lock); 1098 } 1099 1100 /* 1101 * entropy_notify() 1102 * 1103 * Caller just contributed entropy to the global pool. Advance 1104 * the entropy epoch and notify waiters. 1105 * 1106 * Caller must hold the global entropy lock. Except for the 1107 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must 1108 * have just have transitioned from partial entropy to full 1109 * entropy -- E->needed should be zero now. 1110 */ 1111 static void 1112 entropy_notify(void) 1113 { 1114 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1115 static struct timeval lasttime; /* serialized by E->lock */ 1116 unsigned epoch; 1117 1118 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1119 1120 /* 1121 * If this is the first time, print a message to the console 1122 * that we're ready so operators can compare it to the timing 1123 * of other events. 1124 */ 1125 if (__predict_false(!rnd_initial_entropy) && E->needed == 0) { 1126 printf("entropy: ready\n"); 1127 rnd_initial_entropy = 1; 1128 } 1129 1130 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1131 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1132 ratecheck(&lasttime, &interval)) { 1133 epoch = E->epoch + 1; 1134 if (epoch == 0 || epoch == (unsigned)-1) 1135 epoch = 1; 1136 atomic_store_relaxed(&E->epoch, epoch); 1137 } 1138 1139 /* Notify waiters. */ 1140 if (E->stage >= ENTROPY_WARM) { 1141 cv_broadcast(&E->cv); 1142 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1143 } 1144 1145 /* Count another notification. */ 1146 entropy_notify_evcnt.ev_count++; 1147 } 1148 1149 /* 1150 * entropy_consolidate() 1151 * 1152 * Trigger entropy consolidation and wait for it to complete. 1153 * 1154 * This should be used sparingly, not periodically -- requiring 1155 * conscious intervention by the operator or a clear policy 1156 * decision. Otherwise, the kernel will automatically consolidate 1157 * when enough entropy has been gathered into per-CPU pools to 1158 * transition to full entropy. 1159 */ 1160 void 1161 entropy_consolidate(void) 1162 { 1163 uint64_t ticket; 1164 int error; 1165 1166 KASSERT(E->stage == ENTROPY_HOT); 1167 1168 mutex_enter(&E->lock); 1169 ticket = entropy_consolidate_evcnt.ev_count; 1170 E->consolidate = true; 1171 cv_broadcast(&E->cv); 1172 while (ticket == entropy_consolidate_evcnt.ev_count) { 1173 error = cv_wait_sig(&E->cv, &E->lock); 1174 if (error) 1175 break; 1176 } 1177 mutex_exit(&E->lock); 1178 } 1179 1180 /* 1181 * sysctl -w kern.entropy.consolidate=1 1182 * 1183 * Trigger entropy consolidation and wait for it to complete. 1184 * Writable only by superuser. This, writing to /dev/random, and 1185 * ioctl(RNDADDDATA) are the only ways for the system to 1186 * consolidate entropy if the operator knows something the kernel 1187 * doesn't about how unpredictable the pending entropy pools are. 1188 */ 1189 static int 1190 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1191 { 1192 struct sysctlnode node = *rnode; 1193 int arg; 1194 int error; 1195 1196 KASSERT(E->stage == ENTROPY_HOT); 1197 1198 node.sysctl_data = &arg; 1199 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1200 if (error || newp == NULL) 1201 return error; 1202 if (arg) 1203 entropy_consolidate(); 1204 1205 return error; 1206 } 1207 1208 /* 1209 * sysctl -w kern.entropy.gather=1 1210 * 1211 * Trigger gathering entropy from all on-demand sources, and wait 1212 * for synchronous sources (but not asynchronous sources) to 1213 * complete. Writable only by superuser. 1214 */ 1215 static int 1216 sysctl_entropy_gather(SYSCTLFN_ARGS) 1217 { 1218 struct sysctlnode node = *rnode; 1219 int arg; 1220 int error; 1221 1222 KASSERT(E->stage == ENTROPY_HOT); 1223 1224 node.sysctl_data = &arg; 1225 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1226 if (error || newp == NULL) 1227 return error; 1228 if (arg) { 1229 mutex_enter(&E->lock); 1230 entropy_request(ENTROPY_CAPACITY); 1231 mutex_exit(&E->lock); 1232 } 1233 1234 return 0; 1235 } 1236 1237 /* 1238 * entropy_extract(buf, len, flags) 1239 * 1240 * Extract len bytes from the global entropy pool into buf. 1241 * 1242 * Flags may have: 1243 * 1244 * ENTROPY_WAIT Wait for entropy if not available yet. 1245 * ENTROPY_SIG Allow interruption by a signal during wait. 1246 * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1247 * or fail without filling it at all. 1248 * 1249 * Return zero on success, or error on failure: 1250 * 1251 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1252 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1253 * 1254 * If ENTROPY_WAIT is set, allowed only in thread context. If 1255 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's 1256 * awfully high... Do we really need it in hard interrupts? This 1257 * arises from use of cprng_strong(9).) 1258 */ 1259 int 1260 entropy_extract(void *buf, size_t len, int flags) 1261 { 1262 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1263 static struct timeval lasttime; /* serialized by E->lock */ 1264 int error; 1265 1266 if (ISSET(flags, ENTROPY_WAIT)) { 1267 ASSERT_SLEEPABLE(); 1268 KASSERTMSG(E->stage >= ENTROPY_WARM, 1269 "can't wait for entropy until warm"); 1270 } 1271 1272 /* Acquire the global lock to get at the global pool. */ 1273 if (E->stage >= ENTROPY_WARM) 1274 mutex_enter(&E->lock); 1275 1276 /* Count up request for entropy in interrupt context. */ 1277 if (cpu_intr_p()) 1278 entropy_extract_intr_evcnt.ev_count++; 1279 1280 /* Wait until there is enough entropy in the system. */ 1281 error = 0; 1282 while (E->needed) { 1283 /* Ask for more, synchronously if possible. */ 1284 entropy_request(len); 1285 1286 /* If we got enough, we're done. */ 1287 if (E->needed == 0) { 1288 KASSERT(error == 0); 1289 break; 1290 } 1291 1292 /* If not waiting, stop here. */ 1293 if (!ISSET(flags, ENTROPY_WAIT)) { 1294 error = EWOULDBLOCK; 1295 break; 1296 } 1297 1298 /* Wait for some entropy to come in and try again. */ 1299 KASSERT(E->stage >= ENTROPY_WARM); 1300 if (ISSET(flags, ENTROPY_SIG)) { 1301 error = cv_wait_sig(&E->cv, &E->lock); 1302 if (error) 1303 break; 1304 } else { 1305 cv_wait(&E->cv, &E->lock); 1306 } 1307 } 1308 1309 /* 1310 * Count failure -- but fill the buffer nevertheless, unless 1311 * the caller specified ENTROPY_HARDFAIL. 1312 */ 1313 if (error) { 1314 if (ISSET(flags, ENTROPY_HARDFAIL)) 1315 goto out; 1316 entropy_extract_fail_evcnt.ev_count++; 1317 } 1318 1319 /* 1320 * Report a warning if we have never yet reached full entropy. 1321 * This is the only case where we consider entropy to be 1322 * `depleted' without kern.entropy.depletion enabled -- when we 1323 * only have partial entropy, an adversary may be able to 1324 * narrow the state of the pool down to a small number of 1325 * possibilities; the output then enables them to confirm a 1326 * guess, reducing its entropy from the adversary's perspective 1327 * to zero. 1328 */ 1329 if (__predict_false(E->epoch == (unsigned)-1)) { 1330 if (ratecheck(&lasttime, &interval)) 1331 printf("entropy: WARNING:" 1332 " extracting entropy too early\n"); 1333 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); 1334 } 1335 1336 /* Extract data from the pool, and `deplete' if we're doing that. */ 1337 entpool_extract(&E->pool, buf, len); 1338 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1339 error == 0) { 1340 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1341 1342 atomic_store_relaxed(&E->needed, 1343 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); 1344 entropy_deplete_evcnt.ev_count++; 1345 } 1346 1347 out: /* Release the global lock and return the error. */ 1348 if (E->stage >= ENTROPY_WARM) 1349 mutex_exit(&E->lock); 1350 return error; 1351 } 1352 1353 /* 1354 * entropy_poll(events) 1355 * 1356 * Return the subset of events ready, and if it is not all of 1357 * events, record curlwp as waiting for entropy. 1358 */ 1359 int 1360 entropy_poll(int events) 1361 { 1362 int revents = 0; 1363 1364 KASSERT(E->stage >= ENTROPY_WARM); 1365 1366 /* Always ready for writing. */ 1367 revents |= events & (POLLOUT|POLLWRNORM); 1368 1369 /* Narrow it down to reads. */ 1370 events &= POLLIN|POLLRDNORM; 1371 if (events == 0) 1372 return revents; 1373 1374 /* 1375 * If we have reached full entropy and we're not depleting 1376 * entropy, we are forever ready. 1377 */ 1378 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 1379 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1380 return revents | events; 1381 1382 /* 1383 * Otherwise, check whether we need entropy under the lock. If 1384 * we don't, we're ready; if we do, add ourselves to the queue. 1385 */ 1386 mutex_enter(&E->lock); 1387 if (E->needed == 0) 1388 revents |= events; 1389 else 1390 selrecord(curlwp, &E->selq); 1391 mutex_exit(&E->lock); 1392 1393 return revents; 1394 } 1395 1396 /* 1397 * filt_entropy_read_detach(kn) 1398 * 1399 * struct filterops::f_detach callback for entropy read events: 1400 * remove kn from the list of waiters. 1401 */ 1402 static void 1403 filt_entropy_read_detach(struct knote *kn) 1404 { 1405 1406 KASSERT(E->stage >= ENTROPY_WARM); 1407 1408 mutex_enter(&E->lock); 1409 SLIST_REMOVE(&E->selq.sel_klist, kn, knote, kn_selnext); 1410 mutex_exit(&E->lock); 1411 } 1412 1413 /* 1414 * filt_entropy_read_event(kn, hint) 1415 * 1416 * struct filterops::f_event callback for entropy read events: 1417 * poll for entropy. Caller must hold the global entropy lock if 1418 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1419 */ 1420 static int 1421 filt_entropy_read_event(struct knote *kn, long hint) 1422 { 1423 int ret; 1424 1425 KASSERT(E->stage >= ENTROPY_WARM); 1426 1427 /* Acquire the lock, if caller is outside entropy subsystem. */ 1428 if (hint == NOTE_SUBMIT) 1429 KASSERT(mutex_owned(&E->lock)); 1430 else 1431 mutex_enter(&E->lock); 1432 1433 /* 1434 * If we still need entropy, can't read anything; if not, can 1435 * read arbitrarily much. 1436 */ 1437 if (E->needed != 0) { 1438 ret = 0; 1439 } else { 1440 if (atomic_load_relaxed(&entropy_depletion)) 1441 kn->kn_data = ENTROPY_CAPACITY*NBBY; 1442 else 1443 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1444 ret = 1; 1445 } 1446 1447 /* Release the lock, if caller is outside entropy subsystem. */ 1448 if (hint == NOTE_SUBMIT) 1449 KASSERT(mutex_owned(&E->lock)); 1450 else 1451 mutex_exit(&E->lock); 1452 1453 return ret; 1454 } 1455 1456 static const struct filterops entropy_read_filtops = { 1457 .f_isfd = 1, /* XXX Makes sense only for /dev/u?random. */ 1458 .f_attach = NULL, 1459 .f_detach = filt_entropy_read_detach, 1460 .f_event = filt_entropy_read_event, 1461 }; 1462 1463 /* 1464 * entropy_kqfilter(kn) 1465 * 1466 * Register kn to receive entropy event notifications. May be 1467 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1468 */ 1469 int 1470 entropy_kqfilter(struct knote *kn) 1471 { 1472 1473 KASSERT(E->stage >= ENTROPY_WARM); 1474 1475 switch (kn->kn_filter) { 1476 case EVFILT_READ: 1477 /* Enter into the global select queue. */ 1478 mutex_enter(&E->lock); 1479 kn->kn_fop = &entropy_read_filtops; 1480 SLIST_INSERT_HEAD(&E->selq.sel_klist, kn, kn_selnext); 1481 mutex_exit(&E->lock); 1482 return 0; 1483 case EVFILT_WRITE: 1484 /* Can always dump entropy into the system. */ 1485 kn->kn_fop = &seltrue_filtops; 1486 return 0; 1487 default: 1488 return EINVAL; 1489 } 1490 } 1491 1492 /* 1493 * rndsource_setcb(rs, get, getarg) 1494 * 1495 * Set the request callback for the entropy source rs, if it can 1496 * provide entropy on demand. Must precede rnd_attach_source. 1497 */ 1498 void 1499 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1500 void *getarg) 1501 { 1502 1503 rs->get = get; 1504 rs->getarg = getarg; 1505 } 1506 1507 /* 1508 * rnd_attach_source(rs, name, type, flags) 1509 * 1510 * Attach the entropy source rs. Must be done after 1511 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1512 */ 1513 void 1514 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1515 uint32_t flags) 1516 { 1517 uint32_t extra[4]; 1518 unsigned i = 0; 1519 1520 /* Grab cycle counter to mix extra into the pool. */ 1521 extra[i++] = entropy_timer(); 1522 1523 /* 1524 * Apply some standard flags: 1525 * 1526 * - We do not bother with network devices by default, for 1527 * hysterical raisins (perhaps: because it is often the case 1528 * that an adversary can influence network packet timings). 1529 */ 1530 switch (type) { 1531 case RND_TYPE_NET: 1532 flags |= RND_FLAG_NO_COLLECT; 1533 break; 1534 } 1535 1536 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1537 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1538 1539 /* Initialize the random source. */ 1540 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1541 strlcpy(rs->name, name, sizeof(rs->name)); 1542 rs->total = 0; 1543 rs->type = type; 1544 rs->flags = flags; 1545 if (E->stage >= ENTROPY_WARM) 1546 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1547 extra[i++] = entropy_timer(); 1548 1549 /* Wire it into the global list of random sources. */ 1550 if (E->stage >= ENTROPY_WARM) 1551 mutex_enter(&E->lock); 1552 LIST_INSERT_HEAD(&E->sources, rs, list); 1553 if (E->stage >= ENTROPY_WARM) 1554 mutex_exit(&E->lock); 1555 extra[i++] = entropy_timer(); 1556 1557 /* Request that it provide entropy ASAP, if we can. */ 1558 if (ISSET(flags, RND_FLAG_HASCB)) 1559 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1560 extra[i++] = entropy_timer(); 1561 1562 /* Mix the extra into the pool. */ 1563 KASSERT(i == __arraycount(extra)); 1564 entropy_enter(extra, sizeof extra, 0); 1565 explicit_memset(extra, 0, sizeof extra); 1566 } 1567 1568 /* 1569 * rnd_detach_source(rs) 1570 * 1571 * Detach the entropy source rs. May sleep waiting for users to 1572 * drain. Further use is not allowed. 1573 */ 1574 void 1575 rnd_detach_source(struct krndsource *rs) 1576 { 1577 1578 /* 1579 * If we're cold (shouldn't happen, but hey), just remove it 1580 * from the list -- there's nothing allocated. 1581 */ 1582 if (E->stage == ENTROPY_COLD) { 1583 LIST_REMOVE(rs, list); 1584 return; 1585 } 1586 1587 /* We may have to wait for entropy_request. */ 1588 ASSERT_SLEEPABLE(); 1589 1590 /* Wait until the source list is not in use, and remove it. */ 1591 mutex_enter(&E->lock); 1592 while (E->sourcelock) 1593 cv_wait(&E->cv, &E->lock); 1594 LIST_REMOVE(rs, list); 1595 mutex_exit(&E->lock); 1596 1597 /* Free the per-CPU data. */ 1598 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1599 } 1600 1601 /* 1602 * rnd_lock_sources() 1603 * 1604 * Prevent changes to the list of rndsources while we iterate it. 1605 * Interruptible. Caller must hold the global entropy lock. If 1606 * successful, no rndsource will go away until rnd_unlock_sources 1607 * even while the caller releases the global entropy lock. 1608 */ 1609 static int 1610 rnd_lock_sources(void) 1611 { 1612 int error; 1613 1614 KASSERT(mutex_owned(&E->lock)); 1615 1616 while (E->sourcelock) { 1617 error = cv_wait_sig(&E->cv, &E->lock); 1618 if (error) 1619 return error; 1620 } 1621 1622 E->sourcelock = curlwp; 1623 return 0; 1624 } 1625 1626 /* 1627 * rnd_trylock_sources() 1628 * 1629 * Try to lock the list of sources, but if it's already locked, 1630 * fail. Caller must hold the global entropy lock. If 1631 * successful, no rndsource will go away until rnd_unlock_sources 1632 * even while the caller releases the global entropy lock. 1633 */ 1634 static bool 1635 rnd_trylock_sources(void) 1636 { 1637 1638 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1639 1640 if (E->sourcelock) 1641 return false; 1642 E->sourcelock = curlwp; 1643 return true; 1644 } 1645 1646 /* 1647 * rnd_unlock_sources() 1648 * 1649 * Unlock the list of sources after rnd_lock_sources or 1650 * rnd_trylock_sources. Caller must hold the global entropy lock. 1651 */ 1652 static void 1653 rnd_unlock_sources(void) 1654 { 1655 1656 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1657 1658 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1659 curlwp, E->sourcelock); 1660 E->sourcelock = NULL; 1661 if (E->stage >= ENTROPY_WARM) 1662 cv_broadcast(&E->cv); 1663 } 1664 1665 /* 1666 * rnd_sources_locked() 1667 * 1668 * True if we hold the list of rndsources locked, for diagnostic 1669 * assertions. 1670 */ 1671 static bool __diagused 1672 rnd_sources_locked(void) 1673 { 1674 1675 return E->sourcelock == curlwp; 1676 } 1677 1678 /* 1679 * entropy_request(nbytes) 1680 * 1681 * Request nbytes bytes of entropy from all sources in the system. 1682 * OK if we overdo it. Caller must hold the global entropy lock; 1683 * will release and re-acquire it. 1684 */ 1685 static void 1686 entropy_request(size_t nbytes) 1687 { 1688 struct krndsource *rs; 1689 1690 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1691 1692 /* 1693 * If there is a request in progress, let it proceed. 1694 * Otherwise, note that a request is in progress to avoid 1695 * reentry and to block rnd_detach_source until we're done. 1696 */ 1697 if (!rnd_trylock_sources()) 1698 return; 1699 entropy_request_evcnt.ev_count++; 1700 1701 /* Clamp to the maximum reasonable request. */ 1702 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1703 1704 /* Walk the list of sources. */ 1705 LIST_FOREACH(rs, &E->sources, list) { 1706 /* Skip sources without callbacks. */ 1707 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1708 continue; 1709 1710 /* 1711 * Skip sources that are disabled altogether -- we 1712 * would just ignore their samples anyway. 1713 */ 1714 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 1715 continue; 1716 1717 /* Drop the lock while we call the callback. */ 1718 if (E->stage >= ENTROPY_WARM) 1719 mutex_exit(&E->lock); 1720 (*rs->get)(nbytes, rs->getarg); 1721 if (E->stage >= ENTROPY_WARM) 1722 mutex_enter(&E->lock); 1723 } 1724 1725 /* Notify rnd_detach_source that the request is done. */ 1726 rnd_unlock_sources(); 1727 } 1728 1729 /* 1730 * rnd_add_uint32(rs, value) 1731 * 1732 * Enter 32 bits of data from an entropy source into the pool. 1733 * 1734 * If rs is NULL, may not be called from interrupt context. 1735 * 1736 * If rs is non-NULL, may be called from any context. May drop 1737 * data if called from interrupt context. 1738 */ 1739 void 1740 rnd_add_uint32(struct krndsource *rs, uint32_t value) 1741 { 1742 1743 rnd_add_data(rs, &value, sizeof value, 0); 1744 } 1745 1746 void 1747 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 1748 { 1749 1750 rnd_add_data(rs, &value, sizeof value, 0); 1751 } 1752 1753 void 1754 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 1755 { 1756 1757 rnd_add_data(rs, &value, sizeof value, 0); 1758 } 1759 1760 /* 1761 * rnd_add_data(rs, buf, len, entropybits) 1762 * 1763 * Enter data from an entropy source into the pool, with a 1764 * driver's estimate of how much entropy the physical source of 1765 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 1766 * estimate and treat it as zero. 1767 * 1768 * If rs is NULL, may not be called from interrupt context. 1769 * 1770 * If rs is non-NULL, may be called from any context. May drop 1771 * data if called from interrupt context. 1772 */ 1773 void 1774 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 1775 uint32_t entropybits) 1776 { 1777 uint32_t extra; 1778 uint32_t flags; 1779 1780 KASSERTMSG(howmany(entropybits, NBBY) <= len, 1781 "%s: impossible entropy rate:" 1782 " %"PRIu32" bits in %"PRIu32"-byte string", 1783 rs ? rs->name : "(anonymous)", entropybits, len); 1784 1785 /* If there's no rndsource, just enter the data and time now. */ 1786 if (rs == NULL) { 1787 entropy_enter(buf, len, entropybits); 1788 extra = entropy_timer(); 1789 entropy_enter(&extra, sizeof extra, 0); 1790 explicit_memset(&extra, 0, sizeof extra); 1791 return; 1792 } 1793 1794 /* Load a snapshot of the flags. Ioctl may change them under us. */ 1795 flags = atomic_load_relaxed(&rs->flags); 1796 1797 /* 1798 * Skip if: 1799 * - we're not collecting entropy, or 1800 * - the operator doesn't want to collect entropy from this, or 1801 * - neither data nor timings are being collected from this. 1802 */ 1803 if (!atomic_load_relaxed(&entropy_collection) || 1804 ISSET(flags, RND_FLAG_NO_COLLECT) || 1805 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 1806 return; 1807 1808 /* If asked, ignore the estimate. */ 1809 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 1810 entropybits = 0; 1811 1812 /* If we are collecting data, enter them. */ 1813 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) 1814 rnd_add_data_1(rs, buf, len, entropybits); 1815 1816 /* If we are collecting timings, enter one. */ 1817 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 1818 extra = entropy_timer(); 1819 rnd_add_data_1(rs, &extra, sizeof extra, 0); 1820 } 1821 } 1822 1823 /* 1824 * rnd_add_data_1(rs, buf, len, entropybits) 1825 * 1826 * Internal subroutine to call either entropy_enter_intr, if we're 1827 * in interrupt context, or entropy_enter if not, and to count the 1828 * entropy in an rndsource. 1829 */ 1830 static void 1831 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 1832 uint32_t entropybits) 1833 { 1834 bool fullyused; 1835 1836 /* 1837 * If we're in interrupt context, use entropy_enter_intr and 1838 * take note of whether it consumed the full sample; if not, 1839 * use entropy_enter, which always consumes the full sample. 1840 */ 1841 if (curlwp && cpu_intr_p()) { 1842 fullyused = entropy_enter_intr(buf, len, entropybits); 1843 } else { 1844 entropy_enter(buf, len, entropybits); 1845 fullyused = true; 1846 } 1847 1848 /* 1849 * If we used the full sample, note how many bits were 1850 * contributed from this source. 1851 */ 1852 if (fullyused) { 1853 if (E->stage < ENTROPY_HOT) { 1854 if (E->stage >= ENTROPY_WARM) 1855 mutex_enter(&E->lock); 1856 rs->total += MIN(UINT_MAX - rs->total, entropybits); 1857 if (E->stage >= ENTROPY_WARM) 1858 mutex_exit(&E->lock); 1859 } else { 1860 struct rndsource_cpu *rc = percpu_getref(rs->state); 1861 unsigned nbits = rc->rc_nbits; 1862 1863 nbits += MIN(UINT_MAX - nbits, entropybits); 1864 atomic_store_relaxed(&rc->rc_nbits, nbits); 1865 percpu_putref(rs->state); 1866 } 1867 } 1868 } 1869 1870 /* 1871 * rnd_add_data_sync(rs, buf, len, entropybits) 1872 * 1873 * Same as rnd_add_data. Originally used in rndsource callbacks, 1874 * to break an unnecessary cycle; no longer really needed. 1875 */ 1876 void 1877 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 1878 uint32_t entropybits) 1879 { 1880 1881 rnd_add_data(rs, buf, len, entropybits); 1882 } 1883 1884 /* 1885 * rndsource_entropybits(rs) 1886 * 1887 * Return approximately the number of bits of entropy that have 1888 * been contributed via rs so far. Approximate if other CPUs may 1889 * be calling rnd_add_data concurrently. 1890 */ 1891 static unsigned 1892 rndsource_entropybits(struct krndsource *rs) 1893 { 1894 unsigned nbits = rs->total; 1895 1896 KASSERT(E->stage >= ENTROPY_WARM); 1897 KASSERT(rnd_sources_locked()); 1898 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 1899 return nbits; 1900 } 1901 1902 static void 1903 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1904 { 1905 struct rndsource_cpu *rc = ptr; 1906 unsigned *nbitsp = cookie; 1907 unsigned cpu_nbits; 1908 1909 cpu_nbits = atomic_load_relaxed(&rc->rc_nbits); 1910 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 1911 } 1912 1913 /* 1914 * rndsource_to_user(rs, urs) 1915 * 1916 * Copy a description of rs out to urs for userland. 1917 */ 1918 static void 1919 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 1920 { 1921 1922 KASSERT(E->stage >= ENTROPY_WARM); 1923 KASSERT(rnd_sources_locked()); 1924 1925 /* Avoid kernel memory disclosure. */ 1926 memset(urs, 0, sizeof(*urs)); 1927 1928 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 1929 strlcpy(urs->name, rs->name, sizeof(urs->name)); 1930 urs->total = rndsource_entropybits(rs); 1931 urs->type = rs->type; 1932 urs->flags = atomic_load_relaxed(&rs->flags); 1933 } 1934 1935 /* 1936 * rndsource_to_user_est(rs, urse) 1937 * 1938 * Copy a description of rs and estimation statistics out to urse 1939 * for userland. 1940 */ 1941 static void 1942 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 1943 { 1944 1945 KASSERT(E->stage >= ENTROPY_WARM); 1946 KASSERT(rnd_sources_locked()); 1947 1948 /* Avoid kernel memory disclosure. */ 1949 memset(urse, 0, sizeof(*urse)); 1950 1951 /* Copy out the rndsource description. */ 1952 rndsource_to_user(rs, &urse->rt); 1953 1954 /* Zero out the statistics because we don't do estimation. */ 1955 urse->dt_samples = 0; 1956 urse->dt_total = 0; 1957 urse->dv_samples = 0; 1958 urse->dv_total = 0; 1959 } 1960 1961 /* 1962 * entropy_reset_xc(arg1, arg2) 1963 * 1964 * Reset the current CPU's pending entropy to zero. 1965 */ 1966 static void 1967 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 1968 { 1969 uint32_t extra = entropy_timer(); 1970 struct entropy_cpu *ec; 1971 int s; 1972 1973 /* 1974 * Acquire the per-CPU state, blocking soft interrupts and 1975 * causing hard interrupts to drop samples on the floor. 1976 */ 1977 ec = percpu_getref(entropy_percpu); 1978 s = splsoftserial(); 1979 KASSERT(!ec->ec_locked); 1980 ec->ec_locked = true; 1981 __insn_barrier(); 1982 1983 /* Zero the pending count and enter a cycle count for fun. */ 1984 ec->ec_pending = 0; 1985 entpool_enter(ec->ec_pool, &extra, sizeof extra); 1986 1987 /* Release the per-CPU state. */ 1988 KASSERT(ec->ec_locked); 1989 __insn_barrier(); 1990 ec->ec_locked = false; 1991 splx(s); 1992 percpu_putref(entropy_percpu); 1993 } 1994 1995 /* 1996 * entropy_ioctl(cmd, data) 1997 * 1998 * Handle various /dev/random ioctl queries. 1999 */ 2000 int 2001 entropy_ioctl(unsigned long cmd, void *data) 2002 { 2003 struct krndsource *rs; 2004 bool privileged; 2005 int error; 2006 2007 KASSERT(E->stage >= ENTROPY_WARM); 2008 2009 /* Verify user's authorization to perform the ioctl. */ 2010 switch (cmd) { 2011 case RNDGETENTCNT: 2012 case RNDGETPOOLSTAT: 2013 case RNDGETSRCNUM: 2014 case RNDGETSRCNAME: 2015 case RNDGETESTNUM: 2016 case RNDGETESTNAME: 2017 error = kauth_authorize_device(curlwp->l_cred, 2018 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2019 break; 2020 case RNDCTL: 2021 error = kauth_authorize_device(curlwp->l_cred, 2022 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2023 break; 2024 case RNDADDDATA: 2025 error = kauth_authorize_device(curlwp->l_cred, 2026 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2027 /* Ascertain whether the user's inputs should be counted. */ 2028 if (kauth_authorize_device(curlwp->l_cred, 2029 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2030 NULL, NULL, NULL, NULL) == 0) 2031 privileged = true; 2032 break; 2033 default: { 2034 /* 2035 * XXX Hack to avoid changing module ABI so this can be 2036 * pulled up. Later, we can just remove the argument. 2037 */ 2038 static const struct fileops fops = { 2039 .fo_ioctl = rnd_system_ioctl, 2040 }; 2041 struct file f = { 2042 .f_ops = &fops, 2043 }; 2044 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2045 enosys(), error); 2046 #if defined(_LP64) 2047 if (error == ENOSYS) 2048 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2049 enosys(), error); 2050 #endif 2051 if (error == ENOSYS) 2052 error = ENOTTY; 2053 break; 2054 } 2055 } 2056 2057 /* If anything went wrong with authorization, stop here. */ 2058 if (error) 2059 return error; 2060 2061 /* Dispatch on the command. */ 2062 switch (cmd) { 2063 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2064 uint32_t *countp = data; 2065 2066 mutex_enter(&E->lock); 2067 *countp = ENTROPY_CAPACITY*NBBY - E->needed; 2068 mutex_exit(&E->lock); 2069 2070 break; 2071 } 2072 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2073 rndpoolstat_t *pstat = data; 2074 2075 mutex_enter(&E->lock); 2076 2077 /* parameters */ 2078 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2079 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ 2080 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2081 2082 /* state */ 2083 pstat->added = 0; /* XXX total entropy_enter count */ 2084 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; 2085 pstat->removed = 0; /* XXX total entropy_extract count */ 2086 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2087 pstat->generated = 0; /* XXX bits of data...fabricated? */ 2088 2089 mutex_exit(&E->lock); 2090 break; 2091 } 2092 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2093 rndstat_t *stat = data; 2094 uint32_t start = 0, i = 0; 2095 2096 /* Skip if none requested; fail if too many requested. */ 2097 if (stat->count == 0) 2098 break; 2099 if (stat->count > RND_MAXSTATCOUNT) 2100 return EINVAL; 2101 2102 /* 2103 * Under the lock, find the first one, copy out as many 2104 * as requested, and report how many we copied out. 2105 */ 2106 mutex_enter(&E->lock); 2107 error = rnd_lock_sources(); 2108 if (error) { 2109 mutex_exit(&E->lock); 2110 return error; 2111 } 2112 LIST_FOREACH(rs, &E->sources, list) { 2113 if (start++ == stat->start) 2114 break; 2115 } 2116 while (i < stat->count && rs != NULL) { 2117 mutex_exit(&E->lock); 2118 rndsource_to_user(rs, &stat->source[i++]); 2119 mutex_enter(&E->lock); 2120 rs = LIST_NEXT(rs, list); 2121 } 2122 KASSERT(i <= stat->count); 2123 stat->count = i; 2124 rnd_unlock_sources(); 2125 mutex_exit(&E->lock); 2126 break; 2127 } 2128 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2129 rndstat_est_t *estat = data; 2130 uint32_t start = 0, i = 0; 2131 2132 /* Skip if none requested; fail if too many requested. */ 2133 if (estat->count == 0) 2134 break; 2135 if (estat->count > RND_MAXSTATCOUNT) 2136 return EINVAL; 2137 2138 /* 2139 * Under the lock, find the first one, copy out as many 2140 * as requested, and report how many we copied out. 2141 */ 2142 mutex_enter(&E->lock); 2143 error = rnd_lock_sources(); 2144 if (error) { 2145 mutex_exit(&E->lock); 2146 return error; 2147 } 2148 LIST_FOREACH(rs, &E->sources, list) { 2149 if (start++ == estat->start) 2150 break; 2151 } 2152 while (i < estat->count && rs != NULL) { 2153 mutex_exit(&E->lock); 2154 rndsource_to_user_est(rs, &estat->source[i++]); 2155 mutex_enter(&E->lock); 2156 rs = LIST_NEXT(rs, list); 2157 } 2158 KASSERT(i <= estat->count); 2159 estat->count = i; 2160 rnd_unlock_sources(); 2161 mutex_exit(&E->lock); 2162 break; 2163 } 2164 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2165 rndstat_name_t *nstat = data; 2166 const size_t n = sizeof(rs->name); 2167 2168 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2169 2170 /* 2171 * Under the lock, search by name. If found, copy it 2172 * out; if not found, fail with ENOENT. 2173 */ 2174 mutex_enter(&E->lock); 2175 error = rnd_lock_sources(); 2176 if (error) { 2177 mutex_exit(&E->lock); 2178 return error; 2179 } 2180 LIST_FOREACH(rs, &E->sources, list) { 2181 if (strncmp(rs->name, nstat->name, n) == 0) 2182 break; 2183 } 2184 if (rs != NULL) { 2185 mutex_exit(&E->lock); 2186 rndsource_to_user(rs, &nstat->source); 2187 mutex_enter(&E->lock); 2188 } else { 2189 error = ENOENT; 2190 } 2191 rnd_unlock_sources(); 2192 mutex_exit(&E->lock); 2193 break; 2194 } 2195 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2196 rndstat_est_name_t *enstat = data; 2197 const size_t n = sizeof(rs->name); 2198 2199 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2200 2201 /* 2202 * Under the lock, search by name. If found, copy it 2203 * out; if not found, fail with ENOENT. 2204 */ 2205 mutex_enter(&E->lock); 2206 error = rnd_lock_sources(); 2207 if (error) { 2208 mutex_exit(&E->lock); 2209 return error; 2210 } 2211 LIST_FOREACH(rs, &E->sources, list) { 2212 if (strncmp(rs->name, enstat->name, n) == 0) 2213 break; 2214 } 2215 if (rs != NULL) { 2216 mutex_exit(&E->lock); 2217 rndsource_to_user_est(rs, &enstat->source); 2218 mutex_enter(&E->lock); 2219 } else { 2220 error = ENOENT; 2221 } 2222 rnd_unlock_sources(); 2223 mutex_exit(&E->lock); 2224 break; 2225 } 2226 case RNDCTL: { /* Modify entropy source flags. */ 2227 rndctl_t *rndctl = data; 2228 const size_t n = sizeof(rs->name); 2229 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2230 uint32_t flags; 2231 bool reset = false, request = false; 2232 2233 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2234 2235 /* Whitelist the flags that user can change. */ 2236 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2237 2238 /* 2239 * For each matching rndsource, either by type if 2240 * specified or by name if not, set the masked flags. 2241 */ 2242 mutex_enter(&E->lock); 2243 LIST_FOREACH(rs, &E->sources, list) { 2244 if (rndctl->type != 0xff) { 2245 if (rs->type != rndctl->type) 2246 continue; 2247 } else { 2248 if (strncmp(rs->name, rndctl->name, n) != 0) 2249 continue; 2250 } 2251 flags = rs->flags & ~rndctl->mask; 2252 flags |= rndctl->flags & rndctl->mask; 2253 if ((rs->flags & resetflags) == 0 && 2254 (flags & resetflags) != 0) 2255 reset = true; 2256 if ((rs->flags ^ flags) & resetflags) 2257 request = true; 2258 atomic_store_relaxed(&rs->flags, flags); 2259 } 2260 mutex_exit(&E->lock); 2261 2262 /* 2263 * If we disabled estimation or collection, nix all the 2264 * pending entropy and set needed to the maximum. 2265 */ 2266 if (reset) { 2267 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2268 mutex_enter(&E->lock); 2269 E->pending = 0; 2270 atomic_store_relaxed(&E->needed, 2271 ENTROPY_CAPACITY*NBBY); 2272 mutex_exit(&E->lock); 2273 } 2274 2275 /* 2276 * If we changed any of the estimation or collection 2277 * flags, request new samples from everyone -- either 2278 * to make up for what we just lost, or to get new 2279 * samples from what we just added. 2280 */ 2281 if (request) { 2282 mutex_enter(&E->lock); 2283 entropy_request(ENTROPY_CAPACITY); 2284 mutex_exit(&E->lock); 2285 } 2286 break; 2287 } 2288 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2289 rnddata_t *rdata = data; 2290 unsigned entropybits = 0; 2291 2292 if (!atomic_load_relaxed(&entropy_collection)) 2293 break; /* thanks but no thanks */ 2294 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2295 return EINVAL; 2296 2297 /* 2298 * This ioctl serves as the userland alternative a 2299 * bootloader-provided seed -- typically furnished by 2300 * /etc/rc.d/random_seed. We accept the user's entropy 2301 * claim only if 2302 * 2303 * (a) the user is privileged, and 2304 * (b) we have not entered a bootloader seed. 2305 * 2306 * under the assumption that the user may use this to 2307 * load a seed from disk that we have already loaded 2308 * from the bootloader, so we don't double-count it. 2309 */ 2310 if (privileged && rdata->entropy && rdata->len) { 2311 mutex_enter(&E->lock); 2312 if (!E->seeded) { 2313 entropybits = MIN(rdata->entropy, 2314 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2315 E->seeded = true; 2316 } 2317 mutex_exit(&E->lock); 2318 } 2319 2320 /* Enter the data and consolidate entropy. */ 2321 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2322 entropybits); 2323 entropy_consolidate(); 2324 break; 2325 } 2326 default: 2327 error = ENOTTY; 2328 } 2329 2330 /* Return any error that may have come up. */ 2331 return error; 2332 } 2333 2334 /* Legacy entry points */ 2335 2336 void 2337 rnd_seed(void *seed, size_t len) 2338 { 2339 2340 if (len != sizeof(rndsave_t)) { 2341 printf("entropy: invalid seed length: %zu," 2342 " expected sizeof(rndsave_t) = %zu\n", 2343 len, sizeof(rndsave_t)); 2344 return; 2345 } 2346 entropy_seed(seed); 2347 } 2348 2349 void 2350 rnd_init(void) 2351 { 2352 2353 entropy_init(); 2354 } 2355 2356 void 2357 rnd_init_softint(void) 2358 { 2359 2360 entropy_init_late(); 2361 } 2362 2363 int 2364 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2365 { 2366 2367 return entropy_ioctl(cmd, data); 2368 } 2369