1 /* $NetBSD: kern_entropy.c,v 1.24 2020/09/29 07:51:01 gson Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * No entropy estimation based on the sample values, which is a 66 * contradiction in terms and a potential source of side 67 * channels. It is the responsibility of the driver author to 68 * study how predictable the physical source of input can ever 69 * be, and to furnish a lower bound on the amount of entropy it 70 * has. 71 * 72 * * Entropy depletion is available for testing (or if you're into 73 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 74 * the logic to support it is small, to minimize chance of bugs. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.24 2020/09/29 07:51:01 gson Exp $"); 79 80 #include <sys/param.h> 81 #include <sys/types.h> 82 #include <sys/atomic.h> 83 #include <sys/compat_stub.h> 84 #include <sys/condvar.h> 85 #include <sys/cpu.h> 86 #include <sys/entropy.h> 87 #include <sys/errno.h> 88 #include <sys/evcnt.h> 89 #include <sys/event.h> 90 #include <sys/file.h> 91 #include <sys/intr.h> 92 #include <sys/kauth.h> 93 #include <sys/kernel.h> 94 #include <sys/kmem.h> 95 #include <sys/kthread.h> 96 #include <sys/module_hook.h> 97 #include <sys/mutex.h> 98 #include <sys/percpu.h> 99 #include <sys/poll.h> 100 #include <sys/queue.h> 101 #include <sys/rnd.h> /* legacy kernel API */ 102 #include <sys/rndio.h> /* userland ioctl interface */ 103 #include <sys/rndsource.h> /* kernel rndsource driver API */ 104 #include <sys/select.h> 105 #include <sys/selinfo.h> 106 #include <sys/sha1.h> /* for boot seed checksum */ 107 #include <sys/stdint.h> 108 #include <sys/sysctl.h> 109 #include <sys/systm.h> 110 #include <sys/time.h> 111 #include <sys/xcall.h> 112 113 #include <lib/libkern/entpool.h> 114 115 #include <machine/limits.h> 116 117 #ifdef __HAVE_CPU_COUNTER 118 #include <machine/cpu_counter.h> 119 #endif 120 121 /* 122 * struct entropy_cpu 123 * 124 * Per-CPU entropy state. The pool is allocated separately 125 * because percpu(9) sometimes moves per-CPU objects around 126 * without zeroing them, which would lead to unwanted copies of 127 * sensitive secrets. The evcnt is allocated separately becuase 128 * evcnt(9) assumes it stays put in memory. 129 */ 130 struct entropy_cpu { 131 struct evcnt *ec_softint_evcnt; 132 struct entpool *ec_pool; 133 unsigned ec_pending; 134 bool ec_locked; 135 }; 136 137 /* 138 * struct rndsource_cpu 139 * 140 * Per-CPU rndsource state. 141 */ 142 struct rndsource_cpu { 143 unsigned rc_nbits; /* bits of entropy added */ 144 }; 145 146 /* 147 * entropy_global (a.k.a. E for short in this file) 148 * 149 * Global entropy state. Writes protected by the global lock. 150 * Some fields, marked (A), can be read outside the lock, and are 151 * maintained with atomic_load/store_relaxed. 152 */ 153 struct { 154 kmutex_t lock; /* covers all global state */ 155 struct entpool pool; /* global pool for extraction */ 156 unsigned needed; /* (A) needed globally */ 157 unsigned pending; /* (A) pending in per-CPU pools */ 158 unsigned timestamp; /* (A) time of last consolidation */ 159 unsigned epoch; /* (A) changes when needed -> 0 */ 160 kcondvar_t cv; /* notifies state changes */ 161 struct selinfo selq; /* notifies needed -> 0 */ 162 struct lwp *sourcelock; /* lock on list of sources */ 163 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 164 enum entropy_stage { 165 ENTROPY_COLD = 0, /* single-threaded */ 166 ENTROPY_WARM, /* multi-threaded at boot before CPUs */ 167 ENTROPY_HOT, /* multi-threaded multi-CPU */ 168 } stage; 169 bool consolidate; /* kick thread to consolidate */ 170 bool seed_rndsource; /* true if seed source is attached */ 171 bool seeded; /* true if seed file already loaded */ 172 } entropy_global __cacheline_aligned = { 173 /* Fields that must be initialized when the kernel is loaded. */ 174 .needed = ENTROPY_CAPACITY*NBBY, 175 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 176 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 177 .stage = ENTROPY_COLD, 178 }; 179 180 #define E (&entropy_global) /* declutter */ 181 182 /* Read-mostly globals */ 183 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 184 static void *entropy_sih __read_mostly; /* softint handler */ 185 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 186 187 int rnd_initial_entropy __read_mostly; /* XXX legacy */ 188 189 static struct krndsource seed_rndsource __read_mostly; 190 191 /* 192 * Event counters 193 * 194 * Must be careful with adding these because they can serve as 195 * side channels. 196 */ 197 static struct evcnt entropy_discretionary_evcnt = 198 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 199 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 200 static struct evcnt entropy_immediate_evcnt = 201 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 202 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 203 static struct evcnt entropy_partial_evcnt = 204 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 205 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 206 static struct evcnt entropy_consolidate_evcnt = 207 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 208 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 209 static struct evcnt entropy_extract_intr_evcnt = 210 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr"); 211 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt); 212 static struct evcnt entropy_extract_fail_evcnt = 213 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 214 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 215 static struct evcnt entropy_request_evcnt = 216 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 217 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 218 static struct evcnt entropy_deplete_evcnt = 219 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 220 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 221 static struct evcnt entropy_notify_evcnt = 222 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 223 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 224 225 /* Sysctl knobs */ 226 static bool entropy_collection = 1; 227 static bool entropy_depletion = 0; /* Silly! */ 228 229 static const struct sysctlnode *entropy_sysctlroot; 230 static struct sysctllog *entropy_sysctllog; 231 232 /* Forward declarations */ 233 static void entropy_init_cpu(void *, void *, struct cpu_info *); 234 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 235 static void entropy_account_cpu(struct entropy_cpu *); 236 static void entropy_enter(const void *, size_t, unsigned); 237 static bool entropy_enter_intr(const void *, size_t, unsigned); 238 static void entropy_softintr(void *); 239 static void entropy_thread(void *); 240 static uint32_t entropy_pending(void); 241 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 242 static void entropy_do_consolidate(void); 243 static void entropy_consolidate_xc(void *, void *); 244 static void entropy_notify(void); 245 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 246 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 247 static void filt_entropy_read_detach(struct knote *); 248 static int filt_entropy_read_event(struct knote *, long); 249 static void entropy_request(size_t); 250 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 251 uint32_t); 252 static unsigned rndsource_entropybits(struct krndsource *); 253 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 254 static void rndsource_to_user(struct krndsource *, rndsource_t *); 255 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 256 257 /* 258 * entropy_timer() 259 * 260 * Cycle counter, time counter, or anything that changes a wee bit 261 * unpredictably. 262 */ 263 static inline uint32_t 264 entropy_timer(void) 265 { 266 struct bintime bt; 267 uint32_t v; 268 269 /* If we have a CPU cycle counter, use the low 32 bits. */ 270 #ifdef __HAVE_CPU_COUNTER 271 if (__predict_true(cpu_hascounter())) 272 return cpu_counter32(); 273 #endif /* __HAVE_CPU_COUNTER */ 274 275 /* If we're cold, tough. Can't binuptime while cold. */ 276 if (__predict_false(cold)) 277 return 0; 278 279 /* Fold the 128 bits of binuptime into 32 bits. */ 280 binuptime(&bt); 281 v = bt.frac; 282 v ^= bt.frac >> 32; 283 v ^= bt.sec; 284 v ^= bt.sec >> 32; 285 return v; 286 } 287 288 static void 289 attach_seed_rndsource(void) 290 { 291 292 /* 293 * First called no later than entropy_init, while we are still 294 * single-threaded, so no need for RUN_ONCE. 295 */ 296 if (E->stage >= ENTROPY_WARM || E->seed_rndsource) 297 return; 298 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 299 RND_FLAG_COLLECT_VALUE); 300 E->seed_rndsource = true; 301 } 302 303 /* 304 * entropy_init() 305 * 306 * Initialize the entropy subsystem. Panic on failure. 307 * 308 * Requires percpu(9) and sysctl(9) to be initialized. 309 */ 310 static void 311 entropy_init(void) 312 { 313 uint32_t extra[2]; 314 struct krndsource *rs; 315 unsigned i = 0; 316 317 KASSERT(E->stage == ENTROPY_COLD); 318 319 /* Grab some cycle counts early at boot. */ 320 extra[i++] = entropy_timer(); 321 322 /* Run the entropy pool cryptography self-test. */ 323 if (entpool_selftest() == -1) 324 panic("entropy pool crypto self-test failed"); 325 326 /* Create the sysctl directory. */ 327 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 328 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 329 SYSCTL_DESCR("Entropy (random number sources) options"), 330 NULL, 0, NULL, 0, 331 CTL_KERN, CTL_CREATE, CTL_EOL); 332 333 /* Create the sysctl knobs. */ 334 /* XXX These shouldn't be writable at securelevel>0. */ 335 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 336 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 337 SYSCTL_DESCR("Automatically collect entropy from hardware"), 338 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 339 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 340 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 341 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 342 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 343 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 344 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 345 SYSCTL_DESCR("Trigger entropy consolidation now"), 346 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 347 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 348 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 349 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 350 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 351 /* XXX These should maybe not be readable at securelevel>0. */ 352 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 353 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 354 "needed", SYSCTL_DESCR("Systemwide entropy deficit"), 355 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); 356 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 357 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 358 "pending", SYSCTL_DESCR("Entropy pending on CPUs"), 359 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); 360 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 361 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 362 "epoch", SYSCTL_DESCR("Entropy epoch"), 363 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 364 365 /* Initialize the global state for multithreaded operation. */ 366 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); 367 cv_init(&E->cv, "entropy"); 368 selinit(&E->selq); 369 370 /* Make sure the seed source is attached. */ 371 attach_seed_rndsource(); 372 373 /* Note if the bootloader didn't provide a seed. */ 374 if (!E->seeded) 375 printf("entropy: no seed from bootloader\n"); 376 377 /* Allocate the per-CPU records for all early entropy sources. */ 378 LIST_FOREACH(rs, &E->sources, list) 379 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 380 381 /* Enter the boot cycle count to get started. */ 382 extra[i++] = entropy_timer(); 383 KASSERT(i == __arraycount(extra)); 384 entropy_enter(extra, sizeof extra, 0); 385 explicit_memset(extra, 0, sizeof extra); 386 387 /* We are now ready for multi-threaded operation. */ 388 E->stage = ENTROPY_WARM; 389 } 390 391 /* 392 * entropy_init_late() 393 * 394 * Late initialization. Panic on failure. 395 * 396 * Requires CPUs to have been detected and LWPs to have started. 397 */ 398 static void 399 entropy_init_late(void) 400 { 401 int error; 402 403 KASSERT(E->stage == ENTROPY_WARM); 404 405 /* Allocate and initialize the per-CPU state. */ 406 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 407 entropy_init_cpu, entropy_fini_cpu, NULL); 408 409 /* 410 * Establish the softint at the highest softint priority level. 411 * Must happen after CPU detection. 412 */ 413 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 414 &entropy_softintr, NULL); 415 if (entropy_sih == NULL) 416 panic("unable to establish entropy softint"); 417 418 /* 419 * Create the entropy housekeeping thread. Must happen after 420 * lwpinit. 421 */ 422 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 423 entropy_thread, NULL, &entropy_lwp, "entbutler"); 424 if (error) 425 panic("unable to create entropy housekeeping thread: %d", 426 error); 427 428 /* 429 * Wait until the per-CPU initialization has hit all CPUs 430 * before proceeding to mark the entropy system hot. 431 */ 432 xc_barrier(XC_HIGHPRI); 433 E->stage = ENTROPY_HOT; 434 } 435 436 /* 437 * entropy_init_cpu(ptr, cookie, ci) 438 * 439 * percpu(9) constructor for per-CPU entropy pool. 440 */ 441 static void 442 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 443 { 444 struct entropy_cpu *ec = ptr; 445 446 ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), 447 KM_SLEEP); 448 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 449 ec->ec_pending = 0; 450 ec->ec_locked = false; 451 452 evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, 453 ci->ci_cpuname, "entropy softint"); 454 } 455 456 /* 457 * entropy_fini_cpu(ptr, cookie, ci) 458 * 459 * percpu(9) destructor for per-CPU entropy pool. 460 */ 461 static void 462 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 463 { 464 struct entropy_cpu *ec = ptr; 465 466 /* 467 * Zero any lingering data. Disclosure of the per-CPU pool 468 * shouldn't retroactively affect the security of any keys 469 * generated, because entpool(9) erases whatever we have just 470 * drawn out of any pool, but better safe than sorry. 471 */ 472 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 473 474 evcnt_detach(ec->ec_softint_evcnt); 475 476 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 477 kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); 478 } 479 480 /* 481 * entropy_seed(seed) 482 * 483 * Seed the entropy pool with seed. Meant to be called as early 484 * as possible by the bootloader; may be called before or after 485 * entropy_init. Must be called before system reaches userland. 486 * Must be called in thread or soft interrupt context, not in hard 487 * interrupt context. Must be called at most once. 488 * 489 * Overwrites the seed in place. Caller may then free the memory. 490 */ 491 static void 492 entropy_seed(rndsave_t *seed) 493 { 494 SHA1_CTX ctx; 495 uint8_t digest[SHA1_DIGEST_LENGTH]; 496 bool seeded; 497 498 /* 499 * Verify the checksum. If the checksum fails, take the data 500 * but ignore the entropy estimate -- the file may have been 501 * incompletely written with garbage, which is harmless to add 502 * but may not be as unpredictable as alleged. 503 */ 504 SHA1Init(&ctx); 505 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 506 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 507 SHA1Final(digest, &ctx); 508 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 509 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 510 printf("entropy: invalid seed checksum\n"); 511 seed->entropy = 0; 512 } 513 explicit_memset(&ctx, 0, sizeof ctx); 514 explicit_memset(digest, 0, sizeof digest); 515 516 /* 517 * If the entropy is insensibly large, try byte-swapping. 518 * Otherwise assume the file is corrupted and act as though it 519 * has zero entropy. 520 */ 521 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 522 seed->entropy = bswap32(seed->entropy); 523 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 524 seed->entropy = 0; 525 } 526 527 /* Make sure the seed source is attached. */ 528 attach_seed_rndsource(); 529 530 /* Test and set E->seeded. */ 531 if (E->stage >= ENTROPY_WARM) 532 mutex_enter(&E->lock); 533 seeded = E->seeded; 534 E->seeded = (seed->entropy > 0); 535 if (E->stage >= ENTROPY_WARM) 536 mutex_exit(&E->lock); 537 538 /* 539 * If we've been seeded, may be re-entering the same seed 540 * (e.g., bootloader vs module init, or something). No harm in 541 * entering it twice, but it contributes no additional entropy. 542 */ 543 if (seeded) { 544 printf("entropy: double-seeded by bootloader\n"); 545 seed->entropy = 0; 546 } else { 547 printf("entropy: entering seed from bootloader" 548 " with %u bits of entropy\n", (unsigned)seed->entropy); 549 } 550 551 /* Enter it into the pool and promptly zero it. */ 552 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 553 seed->entropy); 554 explicit_memset(seed, 0, sizeof(*seed)); 555 } 556 557 /* 558 * entropy_bootrequest() 559 * 560 * Request entropy from all sources at boot, once config is 561 * complete and interrupts are running. 562 */ 563 void 564 entropy_bootrequest(void) 565 { 566 567 KASSERT(E->stage >= ENTROPY_WARM); 568 569 /* 570 * Request enough to satisfy the maximum entropy shortage. 571 * This is harmless overkill if the bootloader provided a seed. 572 */ 573 mutex_enter(&E->lock); 574 entropy_request(ENTROPY_CAPACITY); 575 mutex_exit(&E->lock); 576 } 577 578 /* 579 * entropy_epoch() 580 * 581 * Returns the current entropy epoch. If this changes, you should 582 * reseed. If -1, means system entropy has not yet reached full 583 * entropy or been explicitly consolidated; never reverts back to 584 * -1. Never zero, so you can always use zero as an uninitialized 585 * sentinel value meaning `reseed ASAP'. 586 * 587 * Usage model: 588 * 589 * struct foo { 590 * struct crypto_prng prng; 591 * unsigned epoch; 592 * } *foo; 593 * 594 * unsigned epoch = entropy_epoch(); 595 * if (__predict_false(epoch != foo->epoch)) { 596 * uint8_t seed[32]; 597 * if (entropy_extract(seed, sizeof seed, 0) != 0) 598 * warn("no entropy"); 599 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 600 * foo->epoch = epoch; 601 * } 602 */ 603 unsigned 604 entropy_epoch(void) 605 { 606 607 /* 608 * Unsigned int, so no need for seqlock for an atomic read, but 609 * make sure we read it afresh each time. 610 */ 611 return atomic_load_relaxed(&E->epoch); 612 } 613 614 /* 615 * entropy_ready() 616 * 617 * True if the entropy pool has full entropy. 618 */ 619 bool 620 entropy_ready(void) 621 { 622 623 return atomic_load_relaxed(&E->needed) == 0; 624 } 625 626 /* 627 * entropy_account_cpu(ec) 628 * 629 * Consider whether to consolidate entropy into the global pool 630 * after we just added some into the current CPU's pending pool. 631 * 632 * - If this CPU can provide enough entropy now, do so. 633 * 634 * - If this and whatever else is available on other CPUs can 635 * provide enough entropy, kick the consolidation thread. 636 * 637 * - Otherwise, do as little as possible, except maybe consolidate 638 * entropy at most once a minute. 639 * 640 * Caller must be bound to a CPU and therefore have exclusive 641 * access to ec. Will acquire and release the global lock. 642 */ 643 static void 644 entropy_account_cpu(struct entropy_cpu *ec) 645 { 646 unsigned diff; 647 648 KASSERT(E->stage == ENTROPY_HOT); 649 650 /* 651 * If there's no entropy needed, and entropy has been 652 * consolidated in the last minute, do nothing. 653 */ 654 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 655 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 656 __predict_true((time_uptime - E->timestamp) <= 60)) 657 return; 658 659 /* If there's nothing pending, stop here. */ 660 if (ec->ec_pending == 0) 661 return; 662 663 /* Consider consolidation, under the lock. */ 664 mutex_enter(&E->lock); 665 if (E->needed != 0 && E->needed <= ec->ec_pending) { 666 /* 667 * If we have not yet attained full entropy but we can 668 * now, do so. This way we disseminate entropy 669 * promptly when it becomes available early at boot; 670 * otherwise we leave it to the entropy consolidation 671 * thread, which is rate-limited to mitigate side 672 * channels and abuse. 673 */ 674 uint8_t buf[ENTPOOL_CAPACITY]; 675 676 /* Transfer from the local pool to the global pool. */ 677 entpool_extract(ec->ec_pool, buf, sizeof buf); 678 entpool_enter(&E->pool, buf, sizeof buf); 679 atomic_store_relaxed(&ec->ec_pending, 0); 680 atomic_store_relaxed(&E->needed, 0); 681 682 /* Notify waiters that we now have full entropy. */ 683 entropy_notify(); 684 entropy_immediate_evcnt.ev_count++; 685 } else { 686 /* Record how much we can add to the global pool. */ 687 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); 688 E->pending += diff; 689 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); 690 691 /* 692 * This should have made a difference unless we were 693 * already saturated. 694 */ 695 KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY); 696 KASSERT(E->pending); 697 698 if (E->needed <= E->pending) { 699 /* 700 * Enough entropy between all the per-CPU 701 * pools. Wake up the housekeeping thread. 702 * 703 * If we don't need any entropy, this doesn't 704 * mean much, but it is the only time we ever 705 * gather additional entropy in case the 706 * accounting has been overly optimistic. This 707 * happens at most once a minute, so there's 708 * negligible performance cost. 709 */ 710 E->consolidate = true; 711 cv_broadcast(&E->cv); 712 if (E->needed == 0) 713 entropy_discretionary_evcnt.ev_count++; 714 } else { 715 /* Can't get full entropy. Keep gathering. */ 716 entropy_partial_evcnt.ev_count++; 717 } 718 } 719 mutex_exit(&E->lock); 720 } 721 722 /* 723 * entropy_enter_early(buf, len, nbits) 724 * 725 * Do entropy bookkeeping globally, before we have established 726 * per-CPU pools. Enter directly into the global pool in the hope 727 * that we enter enough before the first entropy_extract to thwart 728 * iterative-guessing attacks; entropy_extract will warn if not. 729 */ 730 static void 731 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 732 { 733 bool notify = false; 734 735 if (E->stage >= ENTROPY_WARM) 736 mutex_enter(&E->lock); 737 738 /* Enter it into the pool. */ 739 entpool_enter(&E->pool, buf, len); 740 741 /* 742 * Decide whether to notify reseed -- we will do so if either: 743 * (a) we transition from partial entropy to full entropy, or 744 * (b) we get a batch of full entropy all at once. 745 */ 746 notify |= (E->needed && E->needed <= nbits); 747 notify |= (nbits >= ENTROPY_CAPACITY*NBBY); 748 749 /* Subtract from the needed count and notify if appropriate. */ 750 E->needed -= MIN(E->needed, nbits); 751 if (notify) { 752 entropy_notify(); 753 entropy_immediate_evcnt.ev_count++; 754 } 755 756 if (E->stage >= ENTROPY_WARM) 757 mutex_exit(&E->lock); 758 } 759 760 /* 761 * entropy_enter(buf, len, nbits) 762 * 763 * Enter len bytes of data from buf into the system's entropy 764 * pool, stirring as necessary when the internal buffer fills up. 765 * nbits is a lower bound on the number of bits of entropy in the 766 * process that led to this sample. 767 */ 768 static void 769 entropy_enter(const void *buf, size_t len, unsigned nbits) 770 { 771 struct entropy_cpu *ec; 772 uint32_t pending; 773 int s; 774 775 KASSERTMSG(!cpu_intr_p(), 776 "use entropy_enter_intr from interrupt context"); 777 KASSERTMSG(howmany(nbits, NBBY) <= len, 778 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 779 780 /* If it's too early after boot, just use entropy_enter_early. */ 781 if (__predict_false(E->stage < ENTROPY_HOT)) { 782 entropy_enter_early(buf, len, nbits); 783 return; 784 } 785 786 /* 787 * Acquire the per-CPU state, blocking soft interrupts and 788 * causing hard interrupts to drop samples on the floor. 789 */ 790 ec = percpu_getref(entropy_percpu); 791 s = splsoftserial(); 792 KASSERT(!ec->ec_locked); 793 ec->ec_locked = true; 794 __insn_barrier(); 795 796 /* Enter into the per-CPU pool. */ 797 entpool_enter(ec->ec_pool, buf, len); 798 799 /* Count up what we can add. */ 800 pending = ec->ec_pending; 801 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 802 atomic_store_relaxed(&ec->ec_pending, pending); 803 804 /* Consolidate globally if appropriate based on what we added. */ 805 entropy_account_cpu(ec); 806 807 /* Release the per-CPU state. */ 808 KASSERT(ec->ec_locked); 809 __insn_barrier(); 810 ec->ec_locked = false; 811 splx(s); 812 percpu_putref(entropy_percpu); 813 } 814 815 /* 816 * entropy_enter_intr(buf, len, nbits) 817 * 818 * Enter up to len bytes of data from buf into the system's 819 * entropy pool without stirring. nbits is a lower bound on the 820 * number of bits of entropy in the process that led to this 821 * sample. If the sample could be entered completely, assume 822 * nbits of entropy pending; otherwise assume none, since we don't 823 * know whether some parts of the sample are constant, for 824 * instance. Schedule a softint to stir the entropy pool if 825 * needed. Return true if used fully, false if truncated at all. 826 * 827 * Using this in thread context will work, but you might as well 828 * use entropy_enter in that case. 829 */ 830 static bool 831 entropy_enter_intr(const void *buf, size_t len, unsigned nbits) 832 { 833 struct entropy_cpu *ec; 834 bool fullyused = false; 835 uint32_t pending; 836 837 KASSERTMSG(howmany(nbits, NBBY) <= len, 838 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 839 840 /* If it's too early after boot, just use entropy_enter_early. */ 841 if (__predict_false(E->stage < ENTROPY_HOT)) { 842 entropy_enter_early(buf, len, nbits); 843 return true; 844 } 845 846 /* 847 * Acquire the per-CPU state. If someone is in the middle of 848 * using it, drop the sample. Otherwise, take the lock so that 849 * higher-priority interrupts will drop their samples. 850 */ 851 ec = percpu_getref(entropy_percpu); 852 if (ec->ec_locked) 853 goto out0; 854 ec->ec_locked = true; 855 __insn_barrier(); 856 857 /* 858 * Enter as much as we can into the per-CPU pool. If it was 859 * truncated, schedule a softint to stir the pool and stop. 860 */ 861 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 862 softint_schedule(entropy_sih); 863 goto out1; 864 } 865 fullyused = true; 866 867 /* Count up what we can contribute. */ 868 pending = ec->ec_pending; 869 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 870 atomic_store_relaxed(&ec->ec_pending, pending); 871 872 /* Schedule a softint if we added anything and it matters. */ 873 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || 874 atomic_load_relaxed(&entropy_depletion)) && 875 nbits != 0) 876 softint_schedule(entropy_sih); 877 878 out1: /* Release the per-CPU state. */ 879 KASSERT(ec->ec_locked); 880 __insn_barrier(); 881 ec->ec_locked = false; 882 out0: percpu_putref(entropy_percpu); 883 884 return fullyused; 885 } 886 887 /* 888 * entropy_softintr(cookie) 889 * 890 * Soft interrupt handler for entering entropy. Takes care of 891 * stirring the local CPU's entropy pool if it filled up during 892 * hard interrupts, and promptly crediting entropy from the local 893 * CPU's entropy pool to the global entropy pool if needed. 894 */ 895 static void 896 entropy_softintr(void *cookie) 897 { 898 struct entropy_cpu *ec; 899 900 /* 901 * Acquire the per-CPU state. Other users can lock this only 902 * while soft interrupts are blocked. Cause hard interrupts to 903 * drop samples on the floor. 904 */ 905 ec = percpu_getref(entropy_percpu); 906 KASSERT(!ec->ec_locked); 907 ec->ec_locked = true; 908 __insn_barrier(); 909 910 /* Count statistics. */ 911 ec->ec_softint_evcnt->ev_count++; 912 913 /* Stir the pool if necessary. */ 914 entpool_stir(ec->ec_pool); 915 916 /* Consolidate globally if appropriate based on what we added. */ 917 entropy_account_cpu(ec); 918 919 /* Release the per-CPU state. */ 920 KASSERT(ec->ec_locked); 921 __insn_barrier(); 922 ec->ec_locked = false; 923 percpu_putref(entropy_percpu); 924 } 925 926 /* 927 * entropy_thread(cookie) 928 * 929 * Handle any asynchronous entropy housekeeping. 930 */ 931 static void 932 entropy_thread(void *cookie) 933 { 934 bool consolidate; 935 936 for (;;) { 937 /* 938 * Wait until there's full entropy somewhere among the 939 * CPUs, as confirmed at most once per minute, or 940 * someone wants to consolidate. 941 */ 942 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { 943 consolidate = true; 944 } else { 945 mutex_enter(&E->lock); 946 if (!E->consolidate) 947 cv_timedwait(&E->cv, &E->lock, 60*hz); 948 consolidate = E->consolidate; 949 E->consolidate = false; 950 mutex_exit(&E->lock); 951 } 952 953 if (consolidate) { 954 /* Do it. */ 955 entropy_do_consolidate(); 956 957 /* Mitigate abuse. */ 958 kpause("entropy", false, hz, NULL); 959 } 960 } 961 } 962 963 /* 964 * entropy_pending() 965 * 966 * Count up the amount of entropy pending on other CPUs. 967 */ 968 static uint32_t 969 entropy_pending(void) 970 { 971 uint32_t pending = 0; 972 973 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); 974 return pending; 975 } 976 977 static void 978 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 979 { 980 struct entropy_cpu *ec = ptr; 981 uint32_t *pendingp = cookie; 982 uint32_t cpu_pending; 983 984 cpu_pending = atomic_load_relaxed(&ec->ec_pending); 985 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); 986 } 987 988 /* 989 * entropy_do_consolidate() 990 * 991 * Issue a cross-call to gather entropy on all CPUs and advance 992 * the entropy epoch. 993 */ 994 static void 995 entropy_do_consolidate(void) 996 { 997 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 998 static struct timeval lasttime; /* serialized by E->lock */ 999 struct entpool pool; 1000 uint8_t buf[ENTPOOL_CAPACITY]; 1001 unsigned diff; 1002 uint64_t ticket; 1003 1004 /* Gather entropy on all CPUs into a temporary pool. */ 1005 memset(&pool, 0, sizeof pool); 1006 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1007 xc_wait(ticket); 1008 1009 /* Acquire the lock to notify waiters. */ 1010 mutex_enter(&E->lock); 1011 1012 /* Count another consolidation. */ 1013 entropy_consolidate_evcnt.ev_count++; 1014 1015 /* Note when we last consolidated, i.e. now. */ 1016 E->timestamp = time_uptime; 1017 1018 /* Mix what we gathered into the global pool. */ 1019 entpool_extract(&pool, buf, sizeof buf); 1020 entpool_enter(&E->pool, buf, sizeof buf); 1021 explicit_memset(&pool, 0, sizeof pool); 1022 1023 /* Count the entropy that was gathered. */ 1024 diff = MIN(E->needed, E->pending); 1025 atomic_store_relaxed(&E->needed, E->needed - diff); 1026 E->pending -= diff; 1027 if (__predict_false(E->needed > 0)) { 1028 if (ratecheck(&lasttime, &interval)) 1029 printf("entropy: WARNING:" 1030 " consolidating less than full entropy\n"); 1031 } 1032 1033 /* Advance the epoch and notify waiters. */ 1034 entropy_notify(); 1035 1036 /* Release the lock. */ 1037 mutex_exit(&E->lock); 1038 } 1039 1040 /* 1041 * entropy_consolidate_xc(vpool, arg2) 1042 * 1043 * Extract output from the local CPU's input pool and enter it 1044 * into a temporary pool passed as vpool. 1045 */ 1046 static void 1047 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1048 { 1049 struct entpool *pool = vpool; 1050 struct entropy_cpu *ec; 1051 uint8_t buf[ENTPOOL_CAPACITY]; 1052 uint32_t extra[7]; 1053 unsigned i = 0; 1054 int s; 1055 1056 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1057 extra[i++] = cpu_number(); 1058 extra[i++] = entropy_timer(); 1059 1060 /* 1061 * Acquire the per-CPU state, blocking soft interrupts and 1062 * discarding entropy in hard interrupts, so that we can 1063 * extract from the per-CPU pool. 1064 */ 1065 ec = percpu_getref(entropy_percpu); 1066 s = splsoftserial(); 1067 KASSERT(!ec->ec_locked); 1068 ec->ec_locked = true; 1069 __insn_barrier(); 1070 extra[i++] = entropy_timer(); 1071 1072 /* Extract the data and count it no longer pending. */ 1073 entpool_extract(ec->ec_pool, buf, sizeof buf); 1074 atomic_store_relaxed(&ec->ec_pending, 0); 1075 extra[i++] = entropy_timer(); 1076 1077 /* Release the per-CPU state. */ 1078 KASSERT(ec->ec_locked); 1079 __insn_barrier(); 1080 ec->ec_locked = false; 1081 splx(s); 1082 percpu_putref(entropy_percpu); 1083 extra[i++] = entropy_timer(); 1084 1085 /* 1086 * Copy over statistics, and enter the per-CPU extract and the 1087 * extra timing into the temporary pool, under the global lock. 1088 */ 1089 mutex_enter(&E->lock); 1090 extra[i++] = entropy_timer(); 1091 entpool_enter(pool, buf, sizeof buf); 1092 explicit_memset(buf, 0, sizeof buf); 1093 extra[i++] = entropy_timer(); 1094 KASSERT(i == __arraycount(extra)); 1095 entpool_enter(pool, extra, sizeof extra); 1096 explicit_memset(extra, 0, sizeof extra); 1097 mutex_exit(&E->lock); 1098 } 1099 1100 /* 1101 * entropy_notify() 1102 * 1103 * Caller just contributed entropy to the global pool. Advance 1104 * the entropy epoch and notify waiters. 1105 * 1106 * Caller must hold the global entropy lock. Except for the 1107 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must 1108 * have just have transitioned from partial entropy to full 1109 * entropy -- E->needed should be zero now. 1110 */ 1111 static void 1112 entropy_notify(void) 1113 { 1114 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1115 static struct timeval lasttime; /* serialized by E->lock */ 1116 unsigned epoch; 1117 1118 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1119 1120 /* 1121 * If this is the first time, print a message to the console 1122 * that we're ready so operators can compare it to the timing 1123 * of other events. 1124 */ 1125 if (__predict_false(!rnd_initial_entropy) && E->needed == 0) { 1126 printf("entropy: ready\n"); 1127 rnd_initial_entropy = 1; 1128 } 1129 1130 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1131 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1132 ratecheck(&lasttime, &interval)) { 1133 epoch = E->epoch + 1; 1134 if (epoch == 0 || epoch == (unsigned)-1) 1135 epoch = 1; 1136 atomic_store_relaxed(&E->epoch, epoch); 1137 } 1138 1139 /* Notify waiters. */ 1140 if (E->stage >= ENTROPY_WARM) { 1141 cv_broadcast(&E->cv); 1142 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1143 } 1144 1145 /* Count another notification. */ 1146 entropy_notify_evcnt.ev_count++; 1147 } 1148 1149 /* 1150 * entropy_consolidate() 1151 * 1152 * Trigger entropy consolidation and wait for it to complete. 1153 * 1154 * This should be used sparingly, not periodically -- requiring 1155 * conscious intervention by the operator or a clear policy 1156 * decision. Otherwise, the kernel will automatically consolidate 1157 * when enough entropy has been gathered into per-CPU pools to 1158 * transition to full entropy. 1159 */ 1160 void 1161 entropy_consolidate(void) 1162 { 1163 uint64_t ticket; 1164 int error; 1165 1166 KASSERT(E->stage == ENTROPY_HOT); 1167 1168 mutex_enter(&E->lock); 1169 ticket = entropy_consolidate_evcnt.ev_count; 1170 E->consolidate = true; 1171 cv_broadcast(&E->cv); 1172 while (ticket == entropy_consolidate_evcnt.ev_count) { 1173 error = cv_wait_sig(&E->cv, &E->lock); 1174 if (error) 1175 break; 1176 } 1177 mutex_exit(&E->lock); 1178 } 1179 1180 /* 1181 * sysctl -w kern.entropy.consolidate=1 1182 * 1183 * Trigger entropy consolidation and wait for it to complete. 1184 * Writable only by superuser. This, writing to /dev/random, and 1185 * ioctl(RNDADDDATA) are the only ways for the system to 1186 * consolidate entropy if the operator knows something the kernel 1187 * doesn't about how unpredictable the pending entropy pools are. 1188 */ 1189 static int 1190 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1191 { 1192 struct sysctlnode node = *rnode; 1193 int arg; 1194 int error; 1195 1196 KASSERT(E->stage == ENTROPY_HOT); 1197 1198 node.sysctl_data = &arg; 1199 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1200 if (error || newp == NULL) 1201 return error; 1202 if (arg) 1203 entropy_consolidate(); 1204 1205 return error; 1206 } 1207 1208 /* 1209 * sysctl -w kern.entropy.gather=1 1210 * 1211 * Trigger gathering entropy from all on-demand sources, and wait 1212 * for synchronous sources (but not asynchronous sources) to 1213 * complete. Writable only by superuser. 1214 */ 1215 static int 1216 sysctl_entropy_gather(SYSCTLFN_ARGS) 1217 { 1218 struct sysctlnode node = *rnode; 1219 int arg; 1220 int error; 1221 1222 KASSERT(E->stage == ENTROPY_HOT); 1223 1224 node.sysctl_data = &arg; 1225 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1226 if (error || newp == NULL) 1227 return error; 1228 if (arg) { 1229 mutex_enter(&E->lock); 1230 entropy_request(ENTROPY_CAPACITY); 1231 mutex_exit(&E->lock); 1232 } 1233 1234 return 0; 1235 } 1236 1237 /* 1238 * entropy_extract(buf, len, flags) 1239 * 1240 * Extract len bytes from the global entropy pool into buf. 1241 * 1242 * Flags may have: 1243 * 1244 * ENTROPY_WAIT Wait for entropy if not available yet. 1245 * ENTROPY_SIG Allow interruption by a signal during wait. 1246 * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1247 * or fail without filling it at all. 1248 * 1249 * Return zero on success, or error on failure: 1250 * 1251 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1252 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1253 * 1254 * If ENTROPY_WAIT is set, allowed only in thread context. If 1255 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's 1256 * awfully high... Do we really need it in hard interrupts? This 1257 * arises from use of cprng_strong(9).) 1258 */ 1259 int 1260 entropy_extract(void *buf, size_t len, int flags) 1261 { 1262 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1263 static struct timeval lasttime; /* serialized by E->lock */ 1264 int error; 1265 1266 if (ISSET(flags, ENTROPY_WAIT)) { 1267 ASSERT_SLEEPABLE(); 1268 KASSERTMSG(E->stage >= ENTROPY_WARM, 1269 "can't wait for entropy until warm"); 1270 } 1271 1272 /* Acquire the global lock to get at the global pool. */ 1273 if (E->stage >= ENTROPY_WARM) 1274 mutex_enter(&E->lock); 1275 1276 /* Count up request for entropy in interrupt context. */ 1277 if (cpu_intr_p()) 1278 entropy_extract_intr_evcnt.ev_count++; 1279 1280 /* Wait until there is enough entropy in the system. */ 1281 error = 0; 1282 while (E->needed) { 1283 /* Ask for more, synchronously if possible. */ 1284 entropy_request(len); 1285 1286 /* If we got enough, we're done. */ 1287 if (E->needed == 0) { 1288 KASSERT(error == 0); 1289 break; 1290 } 1291 1292 /* If not waiting, stop here. */ 1293 if (!ISSET(flags, ENTROPY_WAIT)) { 1294 error = EWOULDBLOCK; 1295 break; 1296 } 1297 1298 /* Wait for some entropy to come in and try again. */ 1299 KASSERT(E->stage >= ENTROPY_WARM); 1300 printf("entropy: pid %d (%s) blocking due to lack of entropy\n", 1301 curproc->p_pid, curproc->p_comm); 1302 1303 if (ISSET(flags, ENTROPY_SIG)) { 1304 error = cv_wait_sig(&E->cv, &E->lock); 1305 if (error) 1306 break; 1307 } else { 1308 cv_wait(&E->cv, &E->lock); 1309 } 1310 } 1311 1312 /* 1313 * Count failure -- but fill the buffer nevertheless, unless 1314 * the caller specified ENTROPY_HARDFAIL. 1315 */ 1316 if (error) { 1317 if (ISSET(flags, ENTROPY_HARDFAIL)) 1318 goto out; 1319 entropy_extract_fail_evcnt.ev_count++; 1320 } 1321 1322 /* 1323 * Report a warning if we have never yet reached full entropy. 1324 * This is the only case where we consider entropy to be 1325 * `depleted' without kern.entropy.depletion enabled -- when we 1326 * only have partial entropy, an adversary may be able to 1327 * narrow the state of the pool down to a small number of 1328 * possibilities; the output then enables them to confirm a 1329 * guess, reducing its entropy from the adversary's perspective 1330 * to zero. 1331 */ 1332 if (__predict_false(E->epoch == (unsigned)-1)) { 1333 if (ratecheck(&lasttime, &interval)) 1334 printf("entropy: WARNING:" 1335 " extracting entropy too early\n"); 1336 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); 1337 } 1338 1339 /* Extract data from the pool, and `deplete' if we're doing that. */ 1340 entpool_extract(&E->pool, buf, len); 1341 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1342 error == 0) { 1343 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1344 1345 atomic_store_relaxed(&E->needed, 1346 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); 1347 entropy_deplete_evcnt.ev_count++; 1348 } 1349 1350 out: /* Release the global lock and return the error. */ 1351 if (E->stage >= ENTROPY_WARM) 1352 mutex_exit(&E->lock); 1353 return error; 1354 } 1355 1356 /* 1357 * entropy_poll(events) 1358 * 1359 * Return the subset of events ready, and if it is not all of 1360 * events, record curlwp as waiting for entropy. 1361 */ 1362 int 1363 entropy_poll(int events) 1364 { 1365 int revents = 0; 1366 1367 KASSERT(E->stage >= ENTROPY_WARM); 1368 1369 /* Always ready for writing. */ 1370 revents |= events & (POLLOUT|POLLWRNORM); 1371 1372 /* Narrow it down to reads. */ 1373 events &= POLLIN|POLLRDNORM; 1374 if (events == 0) 1375 return revents; 1376 1377 /* 1378 * If we have reached full entropy and we're not depleting 1379 * entropy, we are forever ready. 1380 */ 1381 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 1382 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1383 return revents | events; 1384 1385 /* 1386 * Otherwise, check whether we need entropy under the lock. If 1387 * we don't, we're ready; if we do, add ourselves to the queue. 1388 */ 1389 mutex_enter(&E->lock); 1390 if (E->needed == 0) 1391 revents |= events; 1392 else 1393 selrecord(curlwp, &E->selq); 1394 mutex_exit(&E->lock); 1395 1396 return revents; 1397 } 1398 1399 /* 1400 * filt_entropy_read_detach(kn) 1401 * 1402 * struct filterops::f_detach callback for entropy read events: 1403 * remove kn from the list of waiters. 1404 */ 1405 static void 1406 filt_entropy_read_detach(struct knote *kn) 1407 { 1408 1409 KASSERT(E->stage >= ENTROPY_WARM); 1410 1411 mutex_enter(&E->lock); 1412 SLIST_REMOVE(&E->selq.sel_klist, kn, knote, kn_selnext); 1413 mutex_exit(&E->lock); 1414 } 1415 1416 /* 1417 * filt_entropy_read_event(kn, hint) 1418 * 1419 * struct filterops::f_event callback for entropy read events: 1420 * poll for entropy. Caller must hold the global entropy lock if 1421 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1422 */ 1423 static int 1424 filt_entropy_read_event(struct knote *kn, long hint) 1425 { 1426 int ret; 1427 1428 KASSERT(E->stage >= ENTROPY_WARM); 1429 1430 /* Acquire the lock, if caller is outside entropy subsystem. */ 1431 if (hint == NOTE_SUBMIT) 1432 KASSERT(mutex_owned(&E->lock)); 1433 else 1434 mutex_enter(&E->lock); 1435 1436 /* 1437 * If we still need entropy, can't read anything; if not, can 1438 * read arbitrarily much. 1439 */ 1440 if (E->needed != 0) { 1441 ret = 0; 1442 } else { 1443 if (atomic_load_relaxed(&entropy_depletion)) 1444 kn->kn_data = ENTROPY_CAPACITY*NBBY; 1445 else 1446 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1447 ret = 1; 1448 } 1449 1450 /* Release the lock, if caller is outside entropy subsystem. */ 1451 if (hint == NOTE_SUBMIT) 1452 KASSERT(mutex_owned(&E->lock)); 1453 else 1454 mutex_exit(&E->lock); 1455 1456 return ret; 1457 } 1458 1459 static const struct filterops entropy_read_filtops = { 1460 .f_isfd = 1, /* XXX Makes sense only for /dev/u?random. */ 1461 .f_attach = NULL, 1462 .f_detach = filt_entropy_read_detach, 1463 .f_event = filt_entropy_read_event, 1464 }; 1465 1466 /* 1467 * entropy_kqfilter(kn) 1468 * 1469 * Register kn to receive entropy event notifications. May be 1470 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1471 */ 1472 int 1473 entropy_kqfilter(struct knote *kn) 1474 { 1475 1476 KASSERT(E->stage >= ENTROPY_WARM); 1477 1478 switch (kn->kn_filter) { 1479 case EVFILT_READ: 1480 /* Enter into the global select queue. */ 1481 mutex_enter(&E->lock); 1482 kn->kn_fop = &entropy_read_filtops; 1483 SLIST_INSERT_HEAD(&E->selq.sel_klist, kn, kn_selnext); 1484 mutex_exit(&E->lock); 1485 return 0; 1486 case EVFILT_WRITE: 1487 /* Can always dump entropy into the system. */ 1488 kn->kn_fop = &seltrue_filtops; 1489 return 0; 1490 default: 1491 return EINVAL; 1492 } 1493 } 1494 1495 /* 1496 * rndsource_setcb(rs, get, getarg) 1497 * 1498 * Set the request callback for the entropy source rs, if it can 1499 * provide entropy on demand. Must precede rnd_attach_source. 1500 */ 1501 void 1502 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1503 void *getarg) 1504 { 1505 1506 rs->get = get; 1507 rs->getarg = getarg; 1508 } 1509 1510 /* 1511 * rnd_attach_source(rs, name, type, flags) 1512 * 1513 * Attach the entropy source rs. Must be done after 1514 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1515 */ 1516 void 1517 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1518 uint32_t flags) 1519 { 1520 uint32_t extra[4]; 1521 unsigned i = 0; 1522 1523 /* Grab cycle counter to mix extra into the pool. */ 1524 extra[i++] = entropy_timer(); 1525 1526 /* 1527 * Apply some standard flags: 1528 * 1529 * - We do not bother with network devices by default, for 1530 * hysterical raisins (perhaps: because it is often the case 1531 * that an adversary can influence network packet timings). 1532 */ 1533 switch (type) { 1534 case RND_TYPE_NET: 1535 flags |= RND_FLAG_NO_COLLECT; 1536 break; 1537 } 1538 1539 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1540 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1541 1542 /* Initialize the random source. */ 1543 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1544 strlcpy(rs->name, name, sizeof(rs->name)); 1545 rs->total = 0; 1546 rs->type = type; 1547 rs->flags = flags; 1548 if (E->stage >= ENTROPY_WARM) 1549 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1550 extra[i++] = entropy_timer(); 1551 1552 /* Wire it into the global list of random sources. */ 1553 if (E->stage >= ENTROPY_WARM) 1554 mutex_enter(&E->lock); 1555 LIST_INSERT_HEAD(&E->sources, rs, list); 1556 if (E->stage >= ENTROPY_WARM) 1557 mutex_exit(&E->lock); 1558 extra[i++] = entropy_timer(); 1559 1560 /* Request that it provide entropy ASAP, if we can. */ 1561 if (ISSET(flags, RND_FLAG_HASCB)) 1562 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1563 extra[i++] = entropy_timer(); 1564 1565 /* Mix the extra into the pool. */ 1566 KASSERT(i == __arraycount(extra)); 1567 entropy_enter(extra, sizeof extra, 0); 1568 explicit_memset(extra, 0, sizeof extra); 1569 } 1570 1571 /* 1572 * rnd_detach_source(rs) 1573 * 1574 * Detach the entropy source rs. May sleep waiting for users to 1575 * drain. Further use is not allowed. 1576 */ 1577 void 1578 rnd_detach_source(struct krndsource *rs) 1579 { 1580 1581 /* 1582 * If we're cold (shouldn't happen, but hey), just remove it 1583 * from the list -- there's nothing allocated. 1584 */ 1585 if (E->stage == ENTROPY_COLD) { 1586 LIST_REMOVE(rs, list); 1587 return; 1588 } 1589 1590 /* We may have to wait for entropy_request. */ 1591 ASSERT_SLEEPABLE(); 1592 1593 /* Wait until the source list is not in use, and remove it. */ 1594 mutex_enter(&E->lock); 1595 while (E->sourcelock) 1596 cv_wait(&E->cv, &E->lock); 1597 LIST_REMOVE(rs, list); 1598 mutex_exit(&E->lock); 1599 1600 /* Free the per-CPU data. */ 1601 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1602 } 1603 1604 /* 1605 * rnd_lock_sources() 1606 * 1607 * Prevent changes to the list of rndsources while we iterate it. 1608 * Interruptible. Caller must hold the global entropy lock. If 1609 * successful, no rndsource will go away until rnd_unlock_sources 1610 * even while the caller releases the global entropy lock. 1611 */ 1612 static int 1613 rnd_lock_sources(void) 1614 { 1615 int error; 1616 1617 KASSERT(mutex_owned(&E->lock)); 1618 1619 while (E->sourcelock) { 1620 error = cv_wait_sig(&E->cv, &E->lock); 1621 if (error) 1622 return error; 1623 } 1624 1625 E->sourcelock = curlwp; 1626 return 0; 1627 } 1628 1629 /* 1630 * rnd_trylock_sources() 1631 * 1632 * Try to lock the list of sources, but if it's already locked, 1633 * fail. Caller must hold the global entropy lock. If 1634 * successful, no rndsource will go away until rnd_unlock_sources 1635 * even while the caller releases the global entropy lock. 1636 */ 1637 static bool 1638 rnd_trylock_sources(void) 1639 { 1640 1641 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1642 1643 if (E->sourcelock) 1644 return false; 1645 E->sourcelock = curlwp; 1646 return true; 1647 } 1648 1649 /* 1650 * rnd_unlock_sources() 1651 * 1652 * Unlock the list of sources after rnd_lock_sources or 1653 * rnd_trylock_sources. Caller must hold the global entropy lock. 1654 */ 1655 static void 1656 rnd_unlock_sources(void) 1657 { 1658 1659 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1660 1661 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1662 curlwp, E->sourcelock); 1663 E->sourcelock = NULL; 1664 if (E->stage >= ENTROPY_WARM) 1665 cv_broadcast(&E->cv); 1666 } 1667 1668 /* 1669 * rnd_sources_locked() 1670 * 1671 * True if we hold the list of rndsources locked, for diagnostic 1672 * assertions. 1673 */ 1674 static bool __diagused 1675 rnd_sources_locked(void) 1676 { 1677 1678 return E->sourcelock == curlwp; 1679 } 1680 1681 /* 1682 * entropy_request(nbytes) 1683 * 1684 * Request nbytes bytes of entropy from all sources in the system. 1685 * OK if we overdo it. Caller must hold the global entropy lock; 1686 * will release and re-acquire it. 1687 */ 1688 static void 1689 entropy_request(size_t nbytes) 1690 { 1691 struct krndsource *rs; 1692 1693 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1694 1695 /* 1696 * If there is a request in progress, let it proceed. 1697 * Otherwise, note that a request is in progress to avoid 1698 * reentry and to block rnd_detach_source until we're done. 1699 */ 1700 if (!rnd_trylock_sources()) 1701 return; 1702 entropy_request_evcnt.ev_count++; 1703 1704 /* Clamp to the maximum reasonable request. */ 1705 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1706 1707 /* Walk the list of sources. */ 1708 LIST_FOREACH(rs, &E->sources, list) { 1709 /* Skip sources without callbacks. */ 1710 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1711 continue; 1712 1713 /* 1714 * Skip sources that are disabled altogether -- we 1715 * would just ignore their samples anyway. 1716 */ 1717 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 1718 continue; 1719 1720 /* Drop the lock while we call the callback. */ 1721 if (E->stage >= ENTROPY_WARM) 1722 mutex_exit(&E->lock); 1723 (*rs->get)(nbytes, rs->getarg); 1724 if (E->stage >= ENTROPY_WARM) 1725 mutex_enter(&E->lock); 1726 } 1727 1728 /* Notify rnd_detach_source that the request is done. */ 1729 rnd_unlock_sources(); 1730 } 1731 1732 /* 1733 * rnd_add_uint32(rs, value) 1734 * 1735 * Enter 32 bits of data from an entropy source into the pool. 1736 * 1737 * If rs is NULL, may not be called from interrupt context. 1738 * 1739 * If rs is non-NULL, may be called from any context. May drop 1740 * data if called from interrupt context. 1741 */ 1742 void 1743 rnd_add_uint32(struct krndsource *rs, uint32_t value) 1744 { 1745 1746 rnd_add_data(rs, &value, sizeof value, 0); 1747 } 1748 1749 void 1750 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 1751 { 1752 1753 rnd_add_data(rs, &value, sizeof value, 0); 1754 } 1755 1756 void 1757 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 1758 { 1759 1760 rnd_add_data(rs, &value, sizeof value, 0); 1761 } 1762 1763 /* 1764 * rnd_add_data(rs, buf, len, entropybits) 1765 * 1766 * Enter data from an entropy source into the pool, with a 1767 * driver's estimate of how much entropy the physical source of 1768 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 1769 * estimate and treat it as zero. 1770 * 1771 * If rs is NULL, may not be called from interrupt context. 1772 * 1773 * If rs is non-NULL, may be called from any context. May drop 1774 * data if called from interrupt context. 1775 */ 1776 void 1777 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 1778 uint32_t entropybits) 1779 { 1780 uint32_t extra; 1781 uint32_t flags; 1782 1783 KASSERTMSG(howmany(entropybits, NBBY) <= len, 1784 "%s: impossible entropy rate:" 1785 " %"PRIu32" bits in %"PRIu32"-byte string", 1786 rs ? rs->name : "(anonymous)", entropybits, len); 1787 1788 /* If there's no rndsource, just enter the data and time now. */ 1789 if (rs == NULL) { 1790 entropy_enter(buf, len, entropybits); 1791 extra = entropy_timer(); 1792 entropy_enter(&extra, sizeof extra, 0); 1793 explicit_memset(&extra, 0, sizeof extra); 1794 return; 1795 } 1796 1797 /* Load a snapshot of the flags. Ioctl may change them under us. */ 1798 flags = atomic_load_relaxed(&rs->flags); 1799 1800 /* 1801 * Skip if: 1802 * - we're not collecting entropy, or 1803 * - the operator doesn't want to collect entropy from this, or 1804 * - neither data nor timings are being collected from this. 1805 */ 1806 if (!atomic_load_relaxed(&entropy_collection) || 1807 ISSET(flags, RND_FLAG_NO_COLLECT) || 1808 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 1809 return; 1810 1811 /* If asked, ignore the estimate. */ 1812 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 1813 entropybits = 0; 1814 1815 /* If we are collecting data, enter them. */ 1816 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) 1817 rnd_add_data_1(rs, buf, len, entropybits); 1818 1819 /* If we are collecting timings, enter one. */ 1820 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 1821 extra = entropy_timer(); 1822 rnd_add_data_1(rs, &extra, sizeof extra, 0); 1823 } 1824 } 1825 1826 /* 1827 * rnd_add_data_1(rs, buf, len, entropybits) 1828 * 1829 * Internal subroutine to call either entropy_enter_intr, if we're 1830 * in interrupt context, or entropy_enter if not, and to count the 1831 * entropy in an rndsource. 1832 */ 1833 static void 1834 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 1835 uint32_t entropybits) 1836 { 1837 bool fullyused; 1838 1839 /* 1840 * If we're in interrupt context, use entropy_enter_intr and 1841 * take note of whether it consumed the full sample; if not, 1842 * use entropy_enter, which always consumes the full sample. 1843 */ 1844 if (curlwp && cpu_intr_p()) { 1845 fullyused = entropy_enter_intr(buf, len, entropybits); 1846 } else { 1847 entropy_enter(buf, len, entropybits); 1848 fullyused = true; 1849 } 1850 1851 /* 1852 * If we used the full sample, note how many bits were 1853 * contributed from this source. 1854 */ 1855 if (fullyused) { 1856 if (E->stage < ENTROPY_HOT) { 1857 if (E->stage >= ENTROPY_WARM) 1858 mutex_enter(&E->lock); 1859 rs->total += MIN(UINT_MAX - rs->total, entropybits); 1860 if (E->stage >= ENTROPY_WARM) 1861 mutex_exit(&E->lock); 1862 } else { 1863 struct rndsource_cpu *rc = percpu_getref(rs->state); 1864 unsigned nbits = rc->rc_nbits; 1865 1866 nbits += MIN(UINT_MAX - nbits, entropybits); 1867 atomic_store_relaxed(&rc->rc_nbits, nbits); 1868 percpu_putref(rs->state); 1869 } 1870 } 1871 } 1872 1873 /* 1874 * rnd_add_data_sync(rs, buf, len, entropybits) 1875 * 1876 * Same as rnd_add_data. Originally used in rndsource callbacks, 1877 * to break an unnecessary cycle; no longer really needed. 1878 */ 1879 void 1880 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 1881 uint32_t entropybits) 1882 { 1883 1884 rnd_add_data(rs, buf, len, entropybits); 1885 } 1886 1887 /* 1888 * rndsource_entropybits(rs) 1889 * 1890 * Return approximately the number of bits of entropy that have 1891 * been contributed via rs so far. Approximate if other CPUs may 1892 * be calling rnd_add_data concurrently. 1893 */ 1894 static unsigned 1895 rndsource_entropybits(struct krndsource *rs) 1896 { 1897 unsigned nbits = rs->total; 1898 1899 KASSERT(E->stage >= ENTROPY_WARM); 1900 KASSERT(rnd_sources_locked()); 1901 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 1902 return nbits; 1903 } 1904 1905 static void 1906 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1907 { 1908 struct rndsource_cpu *rc = ptr; 1909 unsigned *nbitsp = cookie; 1910 unsigned cpu_nbits; 1911 1912 cpu_nbits = atomic_load_relaxed(&rc->rc_nbits); 1913 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 1914 } 1915 1916 /* 1917 * rndsource_to_user(rs, urs) 1918 * 1919 * Copy a description of rs out to urs for userland. 1920 */ 1921 static void 1922 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 1923 { 1924 1925 KASSERT(E->stage >= ENTROPY_WARM); 1926 KASSERT(rnd_sources_locked()); 1927 1928 /* Avoid kernel memory disclosure. */ 1929 memset(urs, 0, sizeof(*urs)); 1930 1931 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 1932 strlcpy(urs->name, rs->name, sizeof(urs->name)); 1933 urs->total = rndsource_entropybits(rs); 1934 urs->type = rs->type; 1935 urs->flags = atomic_load_relaxed(&rs->flags); 1936 } 1937 1938 /* 1939 * rndsource_to_user_est(rs, urse) 1940 * 1941 * Copy a description of rs and estimation statistics out to urse 1942 * for userland. 1943 */ 1944 static void 1945 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 1946 { 1947 1948 KASSERT(E->stage >= ENTROPY_WARM); 1949 KASSERT(rnd_sources_locked()); 1950 1951 /* Avoid kernel memory disclosure. */ 1952 memset(urse, 0, sizeof(*urse)); 1953 1954 /* Copy out the rndsource description. */ 1955 rndsource_to_user(rs, &urse->rt); 1956 1957 /* Zero out the statistics because we don't do estimation. */ 1958 urse->dt_samples = 0; 1959 urse->dt_total = 0; 1960 urse->dv_samples = 0; 1961 urse->dv_total = 0; 1962 } 1963 1964 /* 1965 * entropy_reset_xc(arg1, arg2) 1966 * 1967 * Reset the current CPU's pending entropy to zero. 1968 */ 1969 static void 1970 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 1971 { 1972 uint32_t extra = entropy_timer(); 1973 struct entropy_cpu *ec; 1974 int s; 1975 1976 /* 1977 * Acquire the per-CPU state, blocking soft interrupts and 1978 * causing hard interrupts to drop samples on the floor. 1979 */ 1980 ec = percpu_getref(entropy_percpu); 1981 s = splsoftserial(); 1982 KASSERT(!ec->ec_locked); 1983 ec->ec_locked = true; 1984 __insn_barrier(); 1985 1986 /* Zero the pending count and enter a cycle count for fun. */ 1987 ec->ec_pending = 0; 1988 entpool_enter(ec->ec_pool, &extra, sizeof extra); 1989 1990 /* Release the per-CPU state. */ 1991 KASSERT(ec->ec_locked); 1992 __insn_barrier(); 1993 ec->ec_locked = false; 1994 splx(s); 1995 percpu_putref(entropy_percpu); 1996 } 1997 1998 /* 1999 * entropy_ioctl(cmd, data) 2000 * 2001 * Handle various /dev/random ioctl queries. 2002 */ 2003 int 2004 entropy_ioctl(unsigned long cmd, void *data) 2005 { 2006 struct krndsource *rs; 2007 bool privileged; 2008 int error; 2009 2010 KASSERT(E->stage >= ENTROPY_WARM); 2011 2012 /* Verify user's authorization to perform the ioctl. */ 2013 switch (cmd) { 2014 case RNDGETENTCNT: 2015 case RNDGETPOOLSTAT: 2016 case RNDGETSRCNUM: 2017 case RNDGETSRCNAME: 2018 case RNDGETESTNUM: 2019 case RNDGETESTNAME: 2020 error = kauth_authorize_device(curlwp->l_cred, 2021 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2022 break; 2023 case RNDCTL: 2024 error = kauth_authorize_device(curlwp->l_cred, 2025 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2026 break; 2027 case RNDADDDATA: 2028 error = kauth_authorize_device(curlwp->l_cred, 2029 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2030 /* Ascertain whether the user's inputs should be counted. */ 2031 if (kauth_authorize_device(curlwp->l_cred, 2032 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2033 NULL, NULL, NULL, NULL) == 0) 2034 privileged = true; 2035 break; 2036 default: { 2037 /* 2038 * XXX Hack to avoid changing module ABI so this can be 2039 * pulled up. Later, we can just remove the argument. 2040 */ 2041 static const struct fileops fops = { 2042 .fo_ioctl = rnd_system_ioctl, 2043 }; 2044 struct file f = { 2045 .f_ops = &fops, 2046 }; 2047 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2048 enosys(), error); 2049 #if defined(_LP64) 2050 if (error == ENOSYS) 2051 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2052 enosys(), error); 2053 #endif 2054 if (error == ENOSYS) 2055 error = ENOTTY; 2056 break; 2057 } 2058 } 2059 2060 /* If anything went wrong with authorization, stop here. */ 2061 if (error) 2062 return error; 2063 2064 /* Dispatch on the command. */ 2065 switch (cmd) { 2066 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2067 uint32_t *countp = data; 2068 2069 mutex_enter(&E->lock); 2070 *countp = ENTROPY_CAPACITY*NBBY - E->needed; 2071 mutex_exit(&E->lock); 2072 2073 break; 2074 } 2075 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2076 rndpoolstat_t *pstat = data; 2077 2078 mutex_enter(&E->lock); 2079 2080 /* parameters */ 2081 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2082 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ 2083 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2084 2085 /* state */ 2086 pstat->added = 0; /* XXX total entropy_enter count */ 2087 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; 2088 pstat->removed = 0; /* XXX total entropy_extract count */ 2089 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2090 pstat->generated = 0; /* XXX bits of data...fabricated? */ 2091 2092 mutex_exit(&E->lock); 2093 break; 2094 } 2095 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2096 rndstat_t *stat = data; 2097 uint32_t start = 0, i = 0; 2098 2099 /* Skip if none requested; fail if too many requested. */ 2100 if (stat->count == 0) 2101 break; 2102 if (stat->count > RND_MAXSTATCOUNT) 2103 return EINVAL; 2104 2105 /* 2106 * Under the lock, find the first one, copy out as many 2107 * as requested, and report how many we copied out. 2108 */ 2109 mutex_enter(&E->lock); 2110 error = rnd_lock_sources(); 2111 if (error) { 2112 mutex_exit(&E->lock); 2113 return error; 2114 } 2115 LIST_FOREACH(rs, &E->sources, list) { 2116 if (start++ == stat->start) 2117 break; 2118 } 2119 while (i < stat->count && rs != NULL) { 2120 mutex_exit(&E->lock); 2121 rndsource_to_user(rs, &stat->source[i++]); 2122 mutex_enter(&E->lock); 2123 rs = LIST_NEXT(rs, list); 2124 } 2125 KASSERT(i <= stat->count); 2126 stat->count = i; 2127 rnd_unlock_sources(); 2128 mutex_exit(&E->lock); 2129 break; 2130 } 2131 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2132 rndstat_est_t *estat = data; 2133 uint32_t start = 0, i = 0; 2134 2135 /* Skip if none requested; fail if too many requested. */ 2136 if (estat->count == 0) 2137 break; 2138 if (estat->count > RND_MAXSTATCOUNT) 2139 return EINVAL; 2140 2141 /* 2142 * Under the lock, find the first one, copy out as many 2143 * as requested, and report how many we copied out. 2144 */ 2145 mutex_enter(&E->lock); 2146 error = rnd_lock_sources(); 2147 if (error) { 2148 mutex_exit(&E->lock); 2149 return error; 2150 } 2151 LIST_FOREACH(rs, &E->sources, list) { 2152 if (start++ == estat->start) 2153 break; 2154 } 2155 while (i < estat->count && rs != NULL) { 2156 mutex_exit(&E->lock); 2157 rndsource_to_user_est(rs, &estat->source[i++]); 2158 mutex_enter(&E->lock); 2159 rs = LIST_NEXT(rs, list); 2160 } 2161 KASSERT(i <= estat->count); 2162 estat->count = i; 2163 rnd_unlock_sources(); 2164 mutex_exit(&E->lock); 2165 break; 2166 } 2167 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2168 rndstat_name_t *nstat = data; 2169 const size_t n = sizeof(rs->name); 2170 2171 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2172 2173 /* 2174 * Under the lock, search by name. If found, copy it 2175 * out; if not found, fail with ENOENT. 2176 */ 2177 mutex_enter(&E->lock); 2178 error = rnd_lock_sources(); 2179 if (error) { 2180 mutex_exit(&E->lock); 2181 return error; 2182 } 2183 LIST_FOREACH(rs, &E->sources, list) { 2184 if (strncmp(rs->name, nstat->name, n) == 0) 2185 break; 2186 } 2187 if (rs != NULL) { 2188 mutex_exit(&E->lock); 2189 rndsource_to_user(rs, &nstat->source); 2190 mutex_enter(&E->lock); 2191 } else { 2192 error = ENOENT; 2193 } 2194 rnd_unlock_sources(); 2195 mutex_exit(&E->lock); 2196 break; 2197 } 2198 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2199 rndstat_est_name_t *enstat = data; 2200 const size_t n = sizeof(rs->name); 2201 2202 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2203 2204 /* 2205 * Under the lock, search by name. If found, copy it 2206 * out; if not found, fail with ENOENT. 2207 */ 2208 mutex_enter(&E->lock); 2209 error = rnd_lock_sources(); 2210 if (error) { 2211 mutex_exit(&E->lock); 2212 return error; 2213 } 2214 LIST_FOREACH(rs, &E->sources, list) { 2215 if (strncmp(rs->name, enstat->name, n) == 0) 2216 break; 2217 } 2218 if (rs != NULL) { 2219 mutex_exit(&E->lock); 2220 rndsource_to_user_est(rs, &enstat->source); 2221 mutex_enter(&E->lock); 2222 } else { 2223 error = ENOENT; 2224 } 2225 rnd_unlock_sources(); 2226 mutex_exit(&E->lock); 2227 break; 2228 } 2229 case RNDCTL: { /* Modify entropy source flags. */ 2230 rndctl_t *rndctl = data; 2231 const size_t n = sizeof(rs->name); 2232 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2233 uint32_t flags; 2234 bool reset = false, request = false; 2235 2236 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2237 2238 /* Whitelist the flags that user can change. */ 2239 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2240 2241 /* 2242 * For each matching rndsource, either by type if 2243 * specified or by name if not, set the masked flags. 2244 */ 2245 mutex_enter(&E->lock); 2246 LIST_FOREACH(rs, &E->sources, list) { 2247 if (rndctl->type != 0xff) { 2248 if (rs->type != rndctl->type) 2249 continue; 2250 } else { 2251 if (strncmp(rs->name, rndctl->name, n) != 0) 2252 continue; 2253 } 2254 flags = rs->flags & ~rndctl->mask; 2255 flags |= rndctl->flags & rndctl->mask; 2256 if ((rs->flags & resetflags) == 0 && 2257 (flags & resetflags) != 0) 2258 reset = true; 2259 if ((rs->flags ^ flags) & resetflags) 2260 request = true; 2261 atomic_store_relaxed(&rs->flags, flags); 2262 } 2263 mutex_exit(&E->lock); 2264 2265 /* 2266 * If we disabled estimation or collection, nix all the 2267 * pending entropy and set needed to the maximum. 2268 */ 2269 if (reset) { 2270 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2271 mutex_enter(&E->lock); 2272 E->pending = 0; 2273 atomic_store_relaxed(&E->needed, 2274 ENTROPY_CAPACITY*NBBY); 2275 mutex_exit(&E->lock); 2276 } 2277 2278 /* 2279 * If we changed any of the estimation or collection 2280 * flags, request new samples from everyone -- either 2281 * to make up for what we just lost, or to get new 2282 * samples from what we just added. 2283 */ 2284 if (request) { 2285 mutex_enter(&E->lock); 2286 entropy_request(ENTROPY_CAPACITY); 2287 mutex_exit(&E->lock); 2288 } 2289 break; 2290 } 2291 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2292 rnddata_t *rdata = data; 2293 unsigned entropybits = 0; 2294 2295 if (!atomic_load_relaxed(&entropy_collection)) 2296 break; /* thanks but no thanks */ 2297 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2298 return EINVAL; 2299 2300 /* 2301 * This ioctl serves as the userland alternative a 2302 * bootloader-provided seed -- typically furnished by 2303 * /etc/rc.d/random_seed. We accept the user's entropy 2304 * claim only if 2305 * 2306 * (a) the user is privileged, and 2307 * (b) we have not entered a bootloader seed. 2308 * 2309 * under the assumption that the user may use this to 2310 * load a seed from disk that we have already loaded 2311 * from the bootloader, so we don't double-count it. 2312 */ 2313 if (privileged && rdata->entropy && rdata->len) { 2314 mutex_enter(&E->lock); 2315 if (!E->seeded) { 2316 entropybits = MIN(rdata->entropy, 2317 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2318 E->seeded = true; 2319 } 2320 mutex_exit(&E->lock); 2321 } 2322 2323 /* Enter the data and consolidate entropy. */ 2324 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2325 entropybits); 2326 entropy_consolidate(); 2327 break; 2328 } 2329 default: 2330 error = ENOTTY; 2331 } 2332 2333 /* Return any error that may have come up. */ 2334 return error; 2335 } 2336 2337 /* Legacy entry points */ 2338 2339 void 2340 rnd_seed(void *seed, size_t len) 2341 { 2342 2343 if (len != sizeof(rndsave_t)) { 2344 printf("entropy: invalid seed length: %zu," 2345 " expected sizeof(rndsave_t) = %zu\n", 2346 len, sizeof(rndsave_t)); 2347 return; 2348 } 2349 entropy_seed(seed); 2350 } 2351 2352 void 2353 rnd_init(void) 2354 { 2355 2356 entropy_init(); 2357 } 2358 2359 void 2360 rnd_init_softint(void) 2361 { 2362 2363 entropy_init_late(); 2364 } 2365 2366 int 2367 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2368 { 2369 2370 return entropy_ioctl(cmd, data); 2371 } 2372