1 /* $NetBSD: kern_entropy.c,v 1.33 2021/09/26 15:10:51 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * No entropy estimation based on the sample values, which is a 66 * contradiction in terms and a potential source of side 67 * channels. It is the responsibility of the driver author to 68 * study how predictable the physical source of input can ever 69 * be, and to furnish a lower bound on the amount of entropy it 70 * has. 71 * 72 * * Entropy depletion is available for testing (or if you're into 73 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 74 * the logic to support it is small, to minimize chance of bugs. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.33 2021/09/26 15:10:51 thorpej Exp $"); 79 80 #include <sys/param.h> 81 #include <sys/types.h> 82 #include <sys/atomic.h> 83 #include <sys/compat_stub.h> 84 #include <sys/condvar.h> 85 #include <sys/cpu.h> 86 #include <sys/entropy.h> 87 #include <sys/errno.h> 88 #include <sys/evcnt.h> 89 #include <sys/event.h> 90 #include <sys/file.h> 91 #include <sys/intr.h> 92 #include <sys/kauth.h> 93 #include <sys/kernel.h> 94 #include <sys/kmem.h> 95 #include <sys/kthread.h> 96 #include <sys/module_hook.h> 97 #include <sys/mutex.h> 98 #include <sys/percpu.h> 99 #include <sys/poll.h> 100 #include <sys/queue.h> 101 #include <sys/reboot.h> 102 #include <sys/rnd.h> /* legacy kernel API */ 103 #include <sys/rndio.h> /* userland ioctl interface */ 104 #include <sys/rndsource.h> /* kernel rndsource driver API */ 105 #include <sys/select.h> 106 #include <sys/selinfo.h> 107 #include <sys/sha1.h> /* for boot seed checksum */ 108 #include <sys/stdint.h> 109 #include <sys/sysctl.h> 110 #include <sys/syslog.h> 111 #include <sys/systm.h> 112 #include <sys/time.h> 113 #include <sys/xcall.h> 114 115 #include <lib/libkern/entpool.h> 116 117 #include <machine/limits.h> 118 119 #ifdef __HAVE_CPU_COUNTER 120 #include <machine/cpu_counter.h> 121 #endif 122 123 /* 124 * struct entropy_cpu 125 * 126 * Per-CPU entropy state. The pool is allocated separately 127 * because percpu(9) sometimes moves per-CPU objects around 128 * without zeroing them, which would lead to unwanted copies of 129 * sensitive secrets. The evcnt is allocated separately becuase 130 * evcnt(9) assumes it stays put in memory. 131 */ 132 struct entropy_cpu { 133 struct evcnt *ec_softint_evcnt; 134 struct entpool *ec_pool; 135 unsigned ec_pending; 136 bool ec_locked; 137 }; 138 139 /* 140 * struct rndsource_cpu 141 * 142 * Per-CPU rndsource state. 143 */ 144 struct rndsource_cpu { 145 unsigned rc_entropybits; 146 unsigned rc_timesamples; 147 unsigned rc_datasamples; 148 }; 149 150 /* 151 * entropy_global (a.k.a. E for short in this file) 152 * 153 * Global entropy state. Writes protected by the global lock. 154 * Some fields, marked (A), can be read outside the lock, and are 155 * maintained with atomic_load/store_relaxed. 156 */ 157 struct { 158 kmutex_t lock; /* covers all global state */ 159 struct entpool pool; /* global pool for extraction */ 160 unsigned needed; /* (A) needed globally */ 161 unsigned pending; /* (A) pending in per-CPU pools */ 162 unsigned timestamp; /* (A) time of last consolidation */ 163 unsigned epoch; /* (A) changes when needed -> 0 */ 164 kcondvar_t cv; /* notifies state changes */ 165 struct selinfo selq; /* notifies needed -> 0 */ 166 struct lwp *sourcelock; /* lock on list of sources */ 167 kcondvar_t sourcelock_cv; /* notifies sourcelock release */ 168 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 169 enum entropy_stage { 170 ENTROPY_COLD = 0, /* single-threaded */ 171 ENTROPY_WARM, /* multi-threaded at boot before CPUs */ 172 ENTROPY_HOT, /* multi-threaded multi-CPU */ 173 } stage; 174 bool consolidate; /* kick thread to consolidate */ 175 bool seed_rndsource; /* true if seed source is attached */ 176 bool seeded; /* true if seed file already loaded */ 177 } entropy_global __cacheline_aligned = { 178 /* Fields that must be initialized when the kernel is loaded. */ 179 .needed = ENTROPY_CAPACITY*NBBY, 180 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 181 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 182 .stage = ENTROPY_COLD, 183 }; 184 185 #define E (&entropy_global) /* declutter */ 186 187 /* Read-mostly globals */ 188 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 189 static void *entropy_sih __read_mostly; /* softint handler */ 190 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 191 192 int rnd_initial_entropy __read_mostly; /* XXX legacy */ 193 194 static struct krndsource seed_rndsource __read_mostly; 195 196 /* 197 * Event counters 198 * 199 * Must be careful with adding these because they can serve as 200 * side channels. 201 */ 202 static struct evcnt entropy_discretionary_evcnt = 203 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 204 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 205 static struct evcnt entropy_immediate_evcnt = 206 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 207 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 208 static struct evcnt entropy_partial_evcnt = 209 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 210 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 211 static struct evcnt entropy_consolidate_evcnt = 212 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 213 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 214 static struct evcnt entropy_extract_intr_evcnt = 215 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr"); 216 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt); 217 static struct evcnt entropy_extract_fail_evcnt = 218 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 219 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 220 static struct evcnt entropy_request_evcnt = 221 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 222 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 223 static struct evcnt entropy_deplete_evcnt = 224 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 225 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 226 static struct evcnt entropy_notify_evcnt = 227 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 228 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 229 230 /* Sysctl knobs */ 231 static bool entropy_collection = 1; 232 static bool entropy_depletion = 0; /* Silly! */ 233 234 static const struct sysctlnode *entropy_sysctlroot; 235 static struct sysctllog *entropy_sysctllog; 236 237 /* Forward declarations */ 238 static void entropy_init_cpu(void *, void *, struct cpu_info *); 239 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 240 static void entropy_account_cpu(struct entropy_cpu *); 241 static void entropy_enter(const void *, size_t, unsigned); 242 static bool entropy_enter_intr(const void *, size_t, unsigned); 243 static void entropy_softintr(void *); 244 static void entropy_thread(void *); 245 static uint32_t entropy_pending(void); 246 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 247 static void entropy_do_consolidate(void); 248 static void entropy_consolidate_xc(void *, void *); 249 static void entropy_notify(void); 250 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 251 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 252 static void filt_entropy_read_detach(struct knote *); 253 static int filt_entropy_read_event(struct knote *, long); 254 static void entropy_request(size_t); 255 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 256 uint32_t, uint32_t); 257 static unsigned rndsource_entropybits(struct krndsource *); 258 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 259 static void rndsource_to_user(struct krndsource *, rndsource_t *); 260 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 261 static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *); 262 263 /* 264 * entropy_timer() 265 * 266 * Cycle counter, time counter, or anything that changes a wee bit 267 * unpredictably. 268 */ 269 static inline uint32_t 270 entropy_timer(void) 271 { 272 struct bintime bt; 273 uint32_t v; 274 275 /* If we have a CPU cycle counter, use the low 32 bits. */ 276 #ifdef __HAVE_CPU_COUNTER 277 if (__predict_true(cpu_hascounter())) 278 return cpu_counter32(); 279 #endif /* __HAVE_CPU_COUNTER */ 280 281 /* If we're cold, tough. Can't binuptime while cold. */ 282 if (__predict_false(cold)) 283 return 0; 284 285 /* Fold the 128 bits of binuptime into 32 bits. */ 286 binuptime(&bt); 287 v = bt.frac; 288 v ^= bt.frac >> 32; 289 v ^= bt.sec; 290 v ^= bt.sec >> 32; 291 return v; 292 } 293 294 static void 295 attach_seed_rndsource(void) 296 { 297 298 /* 299 * First called no later than entropy_init, while we are still 300 * single-threaded, so no need for RUN_ONCE. 301 */ 302 if (E->stage >= ENTROPY_WARM || E->seed_rndsource) 303 return; 304 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 305 RND_FLAG_COLLECT_VALUE); 306 E->seed_rndsource = true; 307 } 308 309 /* 310 * entropy_init() 311 * 312 * Initialize the entropy subsystem. Panic on failure. 313 * 314 * Requires percpu(9) and sysctl(9) to be initialized. 315 */ 316 static void 317 entropy_init(void) 318 { 319 uint32_t extra[2]; 320 struct krndsource *rs; 321 unsigned i = 0; 322 323 KASSERT(E->stage == ENTROPY_COLD); 324 325 /* Grab some cycle counts early at boot. */ 326 extra[i++] = entropy_timer(); 327 328 /* Run the entropy pool cryptography self-test. */ 329 if (entpool_selftest() == -1) 330 panic("entropy pool crypto self-test failed"); 331 332 /* Create the sysctl directory. */ 333 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 334 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 335 SYSCTL_DESCR("Entropy (random number sources) options"), 336 NULL, 0, NULL, 0, 337 CTL_KERN, CTL_CREATE, CTL_EOL); 338 339 /* Create the sysctl knobs. */ 340 /* XXX These shouldn't be writable at securelevel>0. */ 341 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 342 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 343 SYSCTL_DESCR("Automatically collect entropy from hardware"), 344 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 345 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 346 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 347 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 348 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 349 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 350 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 351 SYSCTL_DESCR("Trigger entropy consolidation now"), 352 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 353 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 354 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 355 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 356 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 357 /* XXX These should maybe not be readable at securelevel>0. */ 358 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 359 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 360 "needed", SYSCTL_DESCR("Systemwide entropy deficit"), 361 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); 362 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 363 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 364 "pending", SYSCTL_DESCR("Entropy pending on CPUs"), 365 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); 366 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 367 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 368 "epoch", SYSCTL_DESCR("Entropy epoch"), 369 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 370 371 /* Initialize the global state for multithreaded operation. */ 372 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); 373 cv_init(&E->cv, "entropy"); 374 selinit(&E->selq); 375 cv_init(&E->sourcelock_cv, "entsrclock"); 376 377 /* Make sure the seed source is attached. */ 378 attach_seed_rndsource(); 379 380 /* Note if the bootloader didn't provide a seed. */ 381 if (!E->seeded) 382 aprint_debug("entropy: no seed from bootloader\n"); 383 384 /* Allocate the per-CPU records for all early entropy sources. */ 385 LIST_FOREACH(rs, &E->sources, list) 386 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 387 388 /* Enter the boot cycle count to get started. */ 389 extra[i++] = entropy_timer(); 390 KASSERT(i == __arraycount(extra)); 391 entropy_enter(extra, sizeof extra, 0); 392 explicit_memset(extra, 0, sizeof extra); 393 394 /* We are now ready for multi-threaded operation. */ 395 E->stage = ENTROPY_WARM; 396 } 397 398 /* 399 * entropy_init_late() 400 * 401 * Late initialization. Panic on failure. 402 * 403 * Requires CPUs to have been detected and LWPs to have started. 404 */ 405 static void 406 entropy_init_late(void) 407 { 408 int error; 409 410 KASSERT(E->stage == ENTROPY_WARM); 411 412 /* Allocate and initialize the per-CPU state. */ 413 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 414 entropy_init_cpu, entropy_fini_cpu, NULL); 415 416 /* 417 * Establish the softint at the highest softint priority level. 418 * Must happen after CPU detection. 419 */ 420 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 421 &entropy_softintr, NULL); 422 if (entropy_sih == NULL) 423 panic("unable to establish entropy softint"); 424 425 /* 426 * Create the entropy housekeeping thread. Must happen after 427 * lwpinit. 428 */ 429 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 430 entropy_thread, NULL, &entropy_lwp, "entbutler"); 431 if (error) 432 panic("unable to create entropy housekeeping thread: %d", 433 error); 434 435 /* 436 * Wait until the per-CPU initialization has hit all CPUs 437 * before proceeding to mark the entropy system hot. 438 */ 439 xc_barrier(XC_HIGHPRI); 440 E->stage = ENTROPY_HOT; 441 } 442 443 /* 444 * entropy_init_cpu(ptr, cookie, ci) 445 * 446 * percpu(9) constructor for per-CPU entropy pool. 447 */ 448 static void 449 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 450 { 451 struct entropy_cpu *ec = ptr; 452 453 ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), 454 KM_SLEEP); 455 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 456 ec->ec_pending = 0; 457 ec->ec_locked = false; 458 459 evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, 460 ci->ci_cpuname, "entropy softint"); 461 } 462 463 /* 464 * entropy_fini_cpu(ptr, cookie, ci) 465 * 466 * percpu(9) destructor for per-CPU entropy pool. 467 */ 468 static void 469 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 470 { 471 struct entropy_cpu *ec = ptr; 472 473 /* 474 * Zero any lingering data. Disclosure of the per-CPU pool 475 * shouldn't retroactively affect the security of any keys 476 * generated, because entpool(9) erases whatever we have just 477 * drawn out of any pool, but better safe than sorry. 478 */ 479 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 480 481 evcnt_detach(ec->ec_softint_evcnt); 482 483 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 484 kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); 485 } 486 487 /* 488 * entropy_seed(seed) 489 * 490 * Seed the entropy pool with seed. Meant to be called as early 491 * as possible by the bootloader; may be called before or after 492 * entropy_init. Must be called before system reaches userland. 493 * Must be called in thread or soft interrupt context, not in hard 494 * interrupt context. Must be called at most once. 495 * 496 * Overwrites the seed in place. Caller may then free the memory. 497 */ 498 static void 499 entropy_seed(rndsave_t *seed) 500 { 501 SHA1_CTX ctx; 502 uint8_t digest[SHA1_DIGEST_LENGTH]; 503 bool seeded; 504 505 /* 506 * Verify the checksum. If the checksum fails, take the data 507 * but ignore the entropy estimate -- the file may have been 508 * incompletely written with garbage, which is harmless to add 509 * but may not be as unpredictable as alleged. 510 */ 511 SHA1Init(&ctx); 512 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 513 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 514 SHA1Final(digest, &ctx); 515 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 516 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 517 printf("entropy: invalid seed checksum\n"); 518 seed->entropy = 0; 519 } 520 explicit_memset(&ctx, 0, sizeof ctx); 521 explicit_memset(digest, 0, sizeof digest); 522 523 /* 524 * If the entropy is insensibly large, try byte-swapping. 525 * Otherwise assume the file is corrupted and act as though it 526 * has zero entropy. 527 */ 528 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 529 seed->entropy = bswap32(seed->entropy); 530 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 531 seed->entropy = 0; 532 } 533 534 /* Make sure the seed source is attached. */ 535 attach_seed_rndsource(); 536 537 /* Test and set E->seeded. */ 538 if (E->stage >= ENTROPY_WARM) 539 mutex_enter(&E->lock); 540 seeded = E->seeded; 541 E->seeded = (seed->entropy > 0); 542 if (E->stage >= ENTROPY_WARM) 543 mutex_exit(&E->lock); 544 545 /* 546 * If we've been seeded, may be re-entering the same seed 547 * (e.g., bootloader vs module init, or something). No harm in 548 * entering it twice, but it contributes no additional entropy. 549 */ 550 if (seeded) { 551 printf("entropy: double-seeded by bootloader\n"); 552 seed->entropy = 0; 553 } else { 554 printf("entropy: entering seed from bootloader" 555 " with %u bits of entropy\n", (unsigned)seed->entropy); 556 } 557 558 /* Enter it into the pool and promptly zero it. */ 559 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 560 seed->entropy); 561 explicit_memset(seed, 0, sizeof(*seed)); 562 } 563 564 /* 565 * entropy_bootrequest() 566 * 567 * Request entropy from all sources at boot, once config is 568 * complete and interrupts are running. 569 */ 570 void 571 entropy_bootrequest(void) 572 { 573 574 KASSERT(E->stage >= ENTROPY_WARM); 575 576 /* 577 * Request enough to satisfy the maximum entropy shortage. 578 * This is harmless overkill if the bootloader provided a seed. 579 */ 580 mutex_enter(&E->lock); 581 entropy_request(ENTROPY_CAPACITY); 582 mutex_exit(&E->lock); 583 } 584 585 /* 586 * entropy_epoch() 587 * 588 * Returns the current entropy epoch. If this changes, you should 589 * reseed. If -1, means system entropy has not yet reached full 590 * entropy or been explicitly consolidated; never reverts back to 591 * -1. Never zero, so you can always use zero as an uninitialized 592 * sentinel value meaning `reseed ASAP'. 593 * 594 * Usage model: 595 * 596 * struct foo { 597 * struct crypto_prng prng; 598 * unsigned epoch; 599 * } *foo; 600 * 601 * unsigned epoch = entropy_epoch(); 602 * if (__predict_false(epoch != foo->epoch)) { 603 * uint8_t seed[32]; 604 * if (entropy_extract(seed, sizeof seed, 0) != 0) 605 * warn("no entropy"); 606 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 607 * foo->epoch = epoch; 608 * } 609 */ 610 unsigned 611 entropy_epoch(void) 612 { 613 614 /* 615 * Unsigned int, so no need for seqlock for an atomic read, but 616 * make sure we read it afresh each time. 617 */ 618 return atomic_load_relaxed(&E->epoch); 619 } 620 621 /* 622 * entropy_ready() 623 * 624 * True if the entropy pool has full entropy. 625 */ 626 bool 627 entropy_ready(void) 628 { 629 630 return atomic_load_relaxed(&E->needed) == 0; 631 } 632 633 /* 634 * entropy_account_cpu(ec) 635 * 636 * Consider whether to consolidate entropy into the global pool 637 * after we just added some into the current CPU's pending pool. 638 * 639 * - If this CPU can provide enough entropy now, do so. 640 * 641 * - If this and whatever else is available on other CPUs can 642 * provide enough entropy, kick the consolidation thread. 643 * 644 * - Otherwise, do as little as possible, except maybe consolidate 645 * entropy at most once a minute. 646 * 647 * Caller must be bound to a CPU and therefore have exclusive 648 * access to ec. Will acquire and release the global lock. 649 */ 650 static void 651 entropy_account_cpu(struct entropy_cpu *ec) 652 { 653 unsigned diff; 654 655 KASSERT(E->stage == ENTROPY_HOT); 656 657 /* 658 * If there's no entropy needed, and entropy has been 659 * consolidated in the last minute, do nothing. 660 */ 661 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 662 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 663 __predict_true((time_uptime - E->timestamp) <= 60)) 664 return; 665 666 /* If there's nothing pending, stop here. */ 667 if (ec->ec_pending == 0) 668 return; 669 670 /* Consider consolidation, under the lock. */ 671 mutex_enter(&E->lock); 672 if (E->needed != 0 && E->needed <= ec->ec_pending) { 673 /* 674 * If we have not yet attained full entropy but we can 675 * now, do so. This way we disseminate entropy 676 * promptly when it becomes available early at boot; 677 * otherwise we leave it to the entropy consolidation 678 * thread, which is rate-limited to mitigate side 679 * channels and abuse. 680 */ 681 uint8_t buf[ENTPOOL_CAPACITY]; 682 683 /* Transfer from the local pool to the global pool. */ 684 entpool_extract(ec->ec_pool, buf, sizeof buf); 685 entpool_enter(&E->pool, buf, sizeof buf); 686 atomic_store_relaxed(&ec->ec_pending, 0); 687 atomic_store_relaxed(&E->needed, 0); 688 689 /* Notify waiters that we now have full entropy. */ 690 entropy_notify(); 691 entropy_immediate_evcnt.ev_count++; 692 } else { 693 /* Record how much we can add to the global pool. */ 694 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); 695 E->pending += diff; 696 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); 697 698 /* 699 * This should have made a difference unless we were 700 * already saturated. 701 */ 702 KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY); 703 KASSERT(E->pending); 704 705 if (E->needed <= E->pending) { 706 /* 707 * Enough entropy between all the per-CPU 708 * pools. Wake up the housekeeping thread. 709 * 710 * If we don't need any entropy, this doesn't 711 * mean much, but it is the only time we ever 712 * gather additional entropy in case the 713 * accounting has been overly optimistic. This 714 * happens at most once a minute, so there's 715 * negligible performance cost. 716 */ 717 E->consolidate = true; 718 cv_broadcast(&E->cv); 719 if (E->needed == 0) 720 entropy_discretionary_evcnt.ev_count++; 721 } else { 722 /* Can't get full entropy. Keep gathering. */ 723 entropy_partial_evcnt.ev_count++; 724 } 725 } 726 mutex_exit(&E->lock); 727 } 728 729 /* 730 * entropy_enter_early(buf, len, nbits) 731 * 732 * Do entropy bookkeeping globally, before we have established 733 * per-CPU pools. Enter directly into the global pool in the hope 734 * that we enter enough before the first entropy_extract to thwart 735 * iterative-guessing attacks; entropy_extract will warn if not. 736 */ 737 static void 738 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 739 { 740 bool notify = false; 741 742 if (E->stage >= ENTROPY_WARM) 743 mutex_enter(&E->lock); 744 745 /* Enter it into the pool. */ 746 entpool_enter(&E->pool, buf, len); 747 748 /* 749 * Decide whether to notify reseed -- we will do so if either: 750 * (a) we transition from partial entropy to full entropy, or 751 * (b) we get a batch of full entropy all at once. 752 */ 753 notify |= (E->needed && E->needed <= nbits); 754 notify |= (nbits >= ENTROPY_CAPACITY*NBBY); 755 756 /* Subtract from the needed count and notify if appropriate. */ 757 E->needed -= MIN(E->needed, nbits); 758 if (notify) { 759 entropy_notify(); 760 entropy_immediate_evcnt.ev_count++; 761 } 762 763 if (E->stage >= ENTROPY_WARM) 764 mutex_exit(&E->lock); 765 } 766 767 /* 768 * entropy_enter(buf, len, nbits) 769 * 770 * Enter len bytes of data from buf into the system's entropy 771 * pool, stirring as necessary when the internal buffer fills up. 772 * nbits is a lower bound on the number of bits of entropy in the 773 * process that led to this sample. 774 */ 775 static void 776 entropy_enter(const void *buf, size_t len, unsigned nbits) 777 { 778 struct entropy_cpu *ec; 779 uint32_t pending; 780 int s; 781 782 KASSERTMSG(!cpu_intr_p(), 783 "use entropy_enter_intr from interrupt context"); 784 KASSERTMSG(howmany(nbits, NBBY) <= len, 785 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 786 787 /* If it's too early after boot, just use entropy_enter_early. */ 788 if (__predict_false(E->stage < ENTROPY_HOT)) { 789 entropy_enter_early(buf, len, nbits); 790 return; 791 } 792 793 /* 794 * Acquire the per-CPU state, blocking soft interrupts and 795 * causing hard interrupts to drop samples on the floor. 796 */ 797 ec = percpu_getref(entropy_percpu); 798 s = splsoftserial(); 799 KASSERT(!ec->ec_locked); 800 ec->ec_locked = true; 801 __insn_barrier(); 802 803 /* Enter into the per-CPU pool. */ 804 entpool_enter(ec->ec_pool, buf, len); 805 806 /* Count up what we can add. */ 807 pending = ec->ec_pending; 808 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 809 atomic_store_relaxed(&ec->ec_pending, pending); 810 811 /* Consolidate globally if appropriate based on what we added. */ 812 entropy_account_cpu(ec); 813 814 /* Release the per-CPU state. */ 815 KASSERT(ec->ec_locked); 816 __insn_barrier(); 817 ec->ec_locked = false; 818 splx(s); 819 percpu_putref(entropy_percpu); 820 } 821 822 /* 823 * entropy_enter_intr(buf, len, nbits) 824 * 825 * Enter up to len bytes of data from buf into the system's 826 * entropy pool without stirring. nbits is a lower bound on the 827 * number of bits of entropy in the process that led to this 828 * sample. If the sample could be entered completely, assume 829 * nbits of entropy pending; otherwise assume none, since we don't 830 * know whether some parts of the sample are constant, for 831 * instance. Schedule a softint to stir the entropy pool if 832 * needed. Return true if used fully, false if truncated at all. 833 * 834 * Using this in thread context will work, but you might as well 835 * use entropy_enter in that case. 836 */ 837 static bool 838 entropy_enter_intr(const void *buf, size_t len, unsigned nbits) 839 { 840 struct entropy_cpu *ec; 841 bool fullyused = false; 842 uint32_t pending; 843 844 KASSERTMSG(howmany(nbits, NBBY) <= len, 845 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 846 847 /* If it's too early after boot, just use entropy_enter_early. */ 848 if (__predict_false(E->stage < ENTROPY_HOT)) { 849 entropy_enter_early(buf, len, nbits); 850 return true; 851 } 852 853 /* 854 * Acquire the per-CPU state. If someone is in the middle of 855 * using it, drop the sample. Otherwise, take the lock so that 856 * higher-priority interrupts will drop their samples. 857 */ 858 ec = percpu_getref(entropy_percpu); 859 if (ec->ec_locked) 860 goto out0; 861 ec->ec_locked = true; 862 __insn_barrier(); 863 864 /* 865 * Enter as much as we can into the per-CPU pool. If it was 866 * truncated, schedule a softint to stir the pool and stop. 867 */ 868 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 869 softint_schedule(entropy_sih); 870 goto out1; 871 } 872 fullyused = true; 873 874 /* Count up what we can contribute. */ 875 pending = ec->ec_pending; 876 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 877 atomic_store_relaxed(&ec->ec_pending, pending); 878 879 /* Schedule a softint if we added anything and it matters. */ 880 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || 881 atomic_load_relaxed(&entropy_depletion)) && 882 nbits != 0) 883 softint_schedule(entropy_sih); 884 885 out1: /* Release the per-CPU state. */ 886 KASSERT(ec->ec_locked); 887 __insn_barrier(); 888 ec->ec_locked = false; 889 out0: percpu_putref(entropy_percpu); 890 891 return fullyused; 892 } 893 894 /* 895 * entropy_softintr(cookie) 896 * 897 * Soft interrupt handler for entering entropy. Takes care of 898 * stirring the local CPU's entropy pool if it filled up during 899 * hard interrupts, and promptly crediting entropy from the local 900 * CPU's entropy pool to the global entropy pool if needed. 901 */ 902 static void 903 entropy_softintr(void *cookie) 904 { 905 struct entropy_cpu *ec; 906 907 /* 908 * Acquire the per-CPU state. Other users can lock this only 909 * while soft interrupts are blocked. Cause hard interrupts to 910 * drop samples on the floor. 911 */ 912 ec = percpu_getref(entropy_percpu); 913 KASSERT(!ec->ec_locked); 914 ec->ec_locked = true; 915 __insn_barrier(); 916 917 /* Count statistics. */ 918 ec->ec_softint_evcnt->ev_count++; 919 920 /* Stir the pool if necessary. */ 921 entpool_stir(ec->ec_pool); 922 923 /* Consolidate globally if appropriate based on what we added. */ 924 entropy_account_cpu(ec); 925 926 /* Release the per-CPU state. */ 927 KASSERT(ec->ec_locked); 928 __insn_barrier(); 929 ec->ec_locked = false; 930 percpu_putref(entropy_percpu); 931 } 932 933 /* 934 * entropy_thread(cookie) 935 * 936 * Handle any asynchronous entropy housekeeping. 937 */ 938 static void 939 entropy_thread(void *cookie) 940 { 941 bool consolidate; 942 943 for (;;) { 944 /* 945 * Wait until there's full entropy somewhere among the 946 * CPUs, as confirmed at most once per minute, or 947 * someone wants to consolidate. 948 */ 949 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { 950 consolidate = true; 951 } else { 952 mutex_enter(&E->lock); 953 if (!E->consolidate) 954 cv_timedwait(&E->cv, &E->lock, 60*hz); 955 consolidate = E->consolidate; 956 E->consolidate = false; 957 mutex_exit(&E->lock); 958 } 959 960 if (consolidate) { 961 /* Do it. */ 962 entropy_do_consolidate(); 963 964 /* Mitigate abuse. */ 965 kpause("entropy", false, hz, NULL); 966 } 967 } 968 } 969 970 /* 971 * entropy_pending() 972 * 973 * Count up the amount of entropy pending on other CPUs. 974 */ 975 static uint32_t 976 entropy_pending(void) 977 { 978 uint32_t pending = 0; 979 980 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); 981 return pending; 982 } 983 984 static void 985 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 986 { 987 struct entropy_cpu *ec = ptr; 988 uint32_t *pendingp = cookie; 989 uint32_t cpu_pending; 990 991 cpu_pending = atomic_load_relaxed(&ec->ec_pending); 992 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); 993 } 994 995 /* 996 * entropy_do_consolidate() 997 * 998 * Issue a cross-call to gather entropy on all CPUs and advance 999 * the entropy epoch. 1000 */ 1001 static void 1002 entropy_do_consolidate(void) 1003 { 1004 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1005 static struct timeval lasttime; /* serialized by E->lock */ 1006 struct entpool pool; 1007 uint8_t buf[ENTPOOL_CAPACITY]; 1008 unsigned diff; 1009 uint64_t ticket; 1010 1011 /* Gather entropy on all CPUs into a temporary pool. */ 1012 memset(&pool, 0, sizeof pool); 1013 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1014 xc_wait(ticket); 1015 1016 /* Acquire the lock to notify waiters. */ 1017 mutex_enter(&E->lock); 1018 1019 /* Count another consolidation. */ 1020 entropy_consolidate_evcnt.ev_count++; 1021 1022 /* Note when we last consolidated, i.e. now. */ 1023 E->timestamp = time_uptime; 1024 1025 /* Mix what we gathered into the global pool. */ 1026 entpool_extract(&pool, buf, sizeof buf); 1027 entpool_enter(&E->pool, buf, sizeof buf); 1028 explicit_memset(&pool, 0, sizeof pool); 1029 1030 /* Count the entropy that was gathered. */ 1031 diff = MIN(E->needed, E->pending); 1032 atomic_store_relaxed(&E->needed, E->needed - diff); 1033 E->pending -= diff; 1034 if (__predict_false(E->needed > 0)) { 1035 if (ratecheck(&lasttime, &interval) && 1036 (boothowto & AB_DEBUG) != 0) { 1037 printf("entropy: WARNING:" 1038 " consolidating less than full entropy\n"); 1039 } 1040 } 1041 1042 /* Advance the epoch and notify waiters. */ 1043 entropy_notify(); 1044 1045 /* Release the lock. */ 1046 mutex_exit(&E->lock); 1047 } 1048 1049 /* 1050 * entropy_consolidate_xc(vpool, arg2) 1051 * 1052 * Extract output from the local CPU's input pool and enter it 1053 * into a temporary pool passed as vpool. 1054 */ 1055 static void 1056 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1057 { 1058 struct entpool *pool = vpool; 1059 struct entropy_cpu *ec; 1060 uint8_t buf[ENTPOOL_CAPACITY]; 1061 uint32_t extra[7]; 1062 unsigned i = 0; 1063 int s; 1064 1065 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1066 extra[i++] = cpu_number(); 1067 extra[i++] = entropy_timer(); 1068 1069 /* 1070 * Acquire the per-CPU state, blocking soft interrupts and 1071 * discarding entropy in hard interrupts, so that we can 1072 * extract from the per-CPU pool. 1073 */ 1074 ec = percpu_getref(entropy_percpu); 1075 s = splsoftserial(); 1076 KASSERT(!ec->ec_locked); 1077 ec->ec_locked = true; 1078 __insn_barrier(); 1079 extra[i++] = entropy_timer(); 1080 1081 /* Extract the data and count it no longer pending. */ 1082 entpool_extract(ec->ec_pool, buf, sizeof buf); 1083 atomic_store_relaxed(&ec->ec_pending, 0); 1084 extra[i++] = entropy_timer(); 1085 1086 /* Release the per-CPU state. */ 1087 KASSERT(ec->ec_locked); 1088 __insn_barrier(); 1089 ec->ec_locked = false; 1090 splx(s); 1091 percpu_putref(entropy_percpu); 1092 extra[i++] = entropy_timer(); 1093 1094 /* 1095 * Copy over statistics, and enter the per-CPU extract and the 1096 * extra timing into the temporary pool, under the global lock. 1097 */ 1098 mutex_enter(&E->lock); 1099 extra[i++] = entropy_timer(); 1100 entpool_enter(pool, buf, sizeof buf); 1101 explicit_memset(buf, 0, sizeof buf); 1102 extra[i++] = entropy_timer(); 1103 KASSERT(i == __arraycount(extra)); 1104 entpool_enter(pool, extra, sizeof extra); 1105 explicit_memset(extra, 0, sizeof extra); 1106 mutex_exit(&E->lock); 1107 } 1108 1109 /* 1110 * entropy_notify() 1111 * 1112 * Caller just contributed entropy to the global pool. Advance 1113 * the entropy epoch and notify waiters. 1114 * 1115 * Caller must hold the global entropy lock. Except for the 1116 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must 1117 * have just have transitioned from partial entropy to full 1118 * entropy -- E->needed should be zero now. 1119 */ 1120 static void 1121 entropy_notify(void) 1122 { 1123 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1124 static struct timeval lasttime; /* serialized by E->lock */ 1125 unsigned epoch; 1126 1127 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1128 1129 /* 1130 * If this is the first time, print a message to the console 1131 * that we're ready so operators can compare it to the timing 1132 * of other events. 1133 */ 1134 if (__predict_false(!rnd_initial_entropy) && E->needed == 0) { 1135 printf("entropy: ready\n"); 1136 rnd_initial_entropy = 1; 1137 } 1138 1139 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1140 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1141 ratecheck(&lasttime, &interval)) { 1142 epoch = E->epoch + 1; 1143 if (epoch == 0 || epoch == (unsigned)-1) 1144 epoch = 1; 1145 atomic_store_relaxed(&E->epoch, epoch); 1146 } 1147 1148 /* Notify waiters. */ 1149 if (E->stage >= ENTROPY_WARM) { 1150 cv_broadcast(&E->cv); 1151 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1152 } 1153 1154 /* Count another notification. */ 1155 entropy_notify_evcnt.ev_count++; 1156 } 1157 1158 /* 1159 * entropy_consolidate() 1160 * 1161 * Trigger entropy consolidation and wait for it to complete. 1162 * 1163 * This should be used sparingly, not periodically -- requiring 1164 * conscious intervention by the operator or a clear policy 1165 * decision. Otherwise, the kernel will automatically consolidate 1166 * when enough entropy has been gathered into per-CPU pools to 1167 * transition to full entropy. 1168 */ 1169 void 1170 entropy_consolidate(void) 1171 { 1172 uint64_t ticket; 1173 int error; 1174 1175 KASSERT(E->stage == ENTROPY_HOT); 1176 1177 mutex_enter(&E->lock); 1178 ticket = entropy_consolidate_evcnt.ev_count; 1179 E->consolidate = true; 1180 cv_broadcast(&E->cv); 1181 while (ticket == entropy_consolidate_evcnt.ev_count) { 1182 error = cv_wait_sig(&E->cv, &E->lock); 1183 if (error) 1184 break; 1185 } 1186 mutex_exit(&E->lock); 1187 } 1188 1189 /* 1190 * sysctl -w kern.entropy.consolidate=1 1191 * 1192 * Trigger entropy consolidation and wait for it to complete. 1193 * Writable only by superuser. This, writing to /dev/random, and 1194 * ioctl(RNDADDDATA) are the only ways for the system to 1195 * consolidate entropy if the operator knows something the kernel 1196 * doesn't about how unpredictable the pending entropy pools are. 1197 */ 1198 static int 1199 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1200 { 1201 struct sysctlnode node = *rnode; 1202 int arg; 1203 int error; 1204 1205 KASSERT(E->stage == ENTROPY_HOT); 1206 1207 node.sysctl_data = &arg; 1208 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1209 if (error || newp == NULL) 1210 return error; 1211 if (arg) 1212 entropy_consolidate(); 1213 1214 return error; 1215 } 1216 1217 /* 1218 * sysctl -w kern.entropy.gather=1 1219 * 1220 * Trigger gathering entropy from all on-demand sources, and wait 1221 * for synchronous sources (but not asynchronous sources) to 1222 * complete. Writable only by superuser. 1223 */ 1224 static int 1225 sysctl_entropy_gather(SYSCTLFN_ARGS) 1226 { 1227 struct sysctlnode node = *rnode; 1228 int arg; 1229 int error; 1230 1231 KASSERT(E->stage == ENTROPY_HOT); 1232 1233 node.sysctl_data = &arg; 1234 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1235 if (error || newp == NULL) 1236 return error; 1237 if (arg) { 1238 mutex_enter(&E->lock); 1239 entropy_request(ENTROPY_CAPACITY); 1240 mutex_exit(&E->lock); 1241 } 1242 1243 return 0; 1244 } 1245 1246 /* 1247 * entropy_extract(buf, len, flags) 1248 * 1249 * Extract len bytes from the global entropy pool into buf. 1250 * 1251 * Flags may have: 1252 * 1253 * ENTROPY_WAIT Wait for entropy if not available yet. 1254 * ENTROPY_SIG Allow interruption by a signal during wait. 1255 * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1256 * or fail without filling it at all. 1257 * 1258 * Return zero on success, or error on failure: 1259 * 1260 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1261 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1262 * 1263 * If ENTROPY_WAIT is set, allowed only in thread context. If 1264 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's 1265 * awfully high... Do we really need it in hard interrupts? This 1266 * arises from use of cprng_strong(9).) 1267 */ 1268 int 1269 entropy_extract(void *buf, size_t len, int flags) 1270 { 1271 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1272 static struct timeval lasttime; /* serialized by E->lock */ 1273 int error; 1274 1275 if (ISSET(flags, ENTROPY_WAIT)) { 1276 ASSERT_SLEEPABLE(); 1277 KASSERTMSG(E->stage >= ENTROPY_WARM, 1278 "can't wait for entropy until warm"); 1279 } 1280 1281 /* Acquire the global lock to get at the global pool. */ 1282 if (E->stage >= ENTROPY_WARM) 1283 mutex_enter(&E->lock); 1284 1285 /* Count up request for entropy in interrupt context. */ 1286 if (cpu_intr_p()) 1287 entropy_extract_intr_evcnt.ev_count++; 1288 1289 /* Wait until there is enough entropy in the system. */ 1290 error = 0; 1291 while (E->needed) { 1292 /* Ask for more, synchronously if possible. */ 1293 entropy_request(len); 1294 1295 /* If we got enough, we're done. */ 1296 if (E->needed == 0) { 1297 KASSERT(error == 0); 1298 break; 1299 } 1300 1301 /* If not waiting, stop here. */ 1302 if (!ISSET(flags, ENTROPY_WAIT)) { 1303 error = EWOULDBLOCK; 1304 break; 1305 } 1306 1307 /* Wait for some entropy to come in and try again. */ 1308 KASSERT(E->stage >= ENTROPY_WARM); 1309 printf("entropy: pid %d (%s) blocking due to lack of entropy\n", 1310 curproc->p_pid, curproc->p_comm); 1311 1312 if (ISSET(flags, ENTROPY_SIG)) { 1313 error = cv_wait_sig(&E->cv, &E->lock); 1314 if (error) 1315 break; 1316 } else { 1317 cv_wait(&E->cv, &E->lock); 1318 } 1319 } 1320 1321 /* 1322 * Count failure -- but fill the buffer nevertheless, unless 1323 * the caller specified ENTROPY_HARDFAIL. 1324 */ 1325 if (error) { 1326 if (ISSET(flags, ENTROPY_HARDFAIL)) 1327 goto out; 1328 entropy_extract_fail_evcnt.ev_count++; 1329 } 1330 1331 /* 1332 * Report a warning if we have never yet reached full entropy. 1333 * This is the only case where we consider entropy to be 1334 * `depleted' without kern.entropy.depletion enabled -- when we 1335 * only have partial entropy, an adversary may be able to 1336 * narrow the state of the pool down to a small number of 1337 * possibilities; the output then enables them to confirm a 1338 * guess, reducing its entropy from the adversary's perspective 1339 * to zero. 1340 */ 1341 if (__predict_false(E->epoch == (unsigned)-1)) { 1342 if (ratecheck(&lasttime, &interval)) 1343 printf("entropy: WARNING:" 1344 " extracting entropy too early\n"); 1345 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); 1346 } 1347 1348 /* Extract data from the pool, and `deplete' if we're doing that. */ 1349 entpool_extract(&E->pool, buf, len); 1350 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1351 error == 0) { 1352 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1353 1354 atomic_store_relaxed(&E->needed, 1355 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); 1356 entropy_deplete_evcnt.ev_count++; 1357 } 1358 1359 out: /* Release the global lock and return the error. */ 1360 if (E->stage >= ENTROPY_WARM) 1361 mutex_exit(&E->lock); 1362 return error; 1363 } 1364 1365 /* 1366 * entropy_poll(events) 1367 * 1368 * Return the subset of events ready, and if it is not all of 1369 * events, record curlwp as waiting for entropy. 1370 */ 1371 int 1372 entropy_poll(int events) 1373 { 1374 int revents = 0; 1375 1376 KASSERT(E->stage >= ENTROPY_WARM); 1377 1378 /* Always ready for writing. */ 1379 revents |= events & (POLLOUT|POLLWRNORM); 1380 1381 /* Narrow it down to reads. */ 1382 events &= POLLIN|POLLRDNORM; 1383 if (events == 0) 1384 return revents; 1385 1386 /* 1387 * If we have reached full entropy and we're not depleting 1388 * entropy, we are forever ready. 1389 */ 1390 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 1391 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1392 return revents | events; 1393 1394 /* 1395 * Otherwise, check whether we need entropy under the lock. If 1396 * we don't, we're ready; if we do, add ourselves to the queue. 1397 */ 1398 mutex_enter(&E->lock); 1399 if (E->needed == 0) 1400 revents |= events; 1401 else 1402 selrecord(curlwp, &E->selq); 1403 mutex_exit(&E->lock); 1404 1405 return revents; 1406 } 1407 1408 /* 1409 * filt_entropy_read_detach(kn) 1410 * 1411 * struct filterops::f_detach callback for entropy read events: 1412 * remove kn from the list of waiters. 1413 */ 1414 static void 1415 filt_entropy_read_detach(struct knote *kn) 1416 { 1417 1418 KASSERT(E->stage >= ENTROPY_WARM); 1419 1420 mutex_enter(&E->lock); 1421 selremove_knote(&E->selq, kn); 1422 mutex_exit(&E->lock); 1423 } 1424 1425 /* 1426 * filt_entropy_read_event(kn, hint) 1427 * 1428 * struct filterops::f_event callback for entropy read events: 1429 * poll for entropy. Caller must hold the global entropy lock if 1430 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1431 */ 1432 static int 1433 filt_entropy_read_event(struct knote *kn, long hint) 1434 { 1435 int ret; 1436 1437 KASSERT(E->stage >= ENTROPY_WARM); 1438 1439 /* Acquire the lock, if caller is outside entropy subsystem. */ 1440 if (hint == NOTE_SUBMIT) 1441 KASSERT(mutex_owned(&E->lock)); 1442 else 1443 mutex_enter(&E->lock); 1444 1445 /* 1446 * If we still need entropy, can't read anything; if not, can 1447 * read arbitrarily much. 1448 */ 1449 if (E->needed != 0) { 1450 ret = 0; 1451 } else { 1452 if (atomic_load_relaxed(&entropy_depletion)) 1453 kn->kn_data = ENTROPY_CAPACITY*NBBY; 1454 else 1455 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1456 ret = 1; 1457 } 1458 1459 /* Release the lock, if caller is outside entropy subsystem. */ 1460 if (hint == NOTE_SUBMIT) 1461 KASSERT(mutex_owned(&E->lock)); 1462 else 1463 mutex_exit(&E->lock); 1464 1465 return ret; 1466 } 1467 1468 /* XXX Makes sense only for /dev/u?random. */ 1469 static const struct filterops entropy_read_filtops = { 1470 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 1471 .f_attach = NULL, 1472 .f_detach = filt_entropy_read_detach, 1473 .f_event = filt_entropy_read_event, 1474 }; 1475 1476 /* 1477 * entropy_kqfilter(kn) 1478 * 1479 * Register kn to receive entropy event notifications. May be 1480 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1481 */ 1482 int 1483 entropy_kqfilter(struct knote *kn) 1484 { 1485 1486 KASSERT(E->stage >= ENTROPY_WARM); 1487 1488 switch (kn->kn_filter) { 1489 case EVFILT_READ: 1490 /* Enter into the global select queue. */ 1491 mutex_enter(&E->lock); 1492 kn->kn_fop = &entropy_read_filtops; 1493 selrecord_knote(&E->selq, kn); 1494 mutex_exit(&E->lock); 1495 return 0; 1496 case EVFILT_WRITE: 1497 /* Can always dump entropy into the system. */ 1498 kn->kn_fop = &seltrue_filtops; 1499 return 0; 1500 default: 1501 return EINVAL; 1502 } 1503 } 1504 1505 /* 1506 * rndsource_setcb(rs, get, getarg) 1507 * 1508 * Set the request callback for the entropy source rs, if it can 1509 * provide entropy on demand. Must precede rnd_attach_source. 1510 */ 1511 void 1512 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1513 void *getarg) 1514 { 1515 1516 rs->get = get; 1517 rs->getarg = getarg; 1518 } 1519 1520 /* 1521 * rnd_attach_source(rs, name, type, flags) 1522 * 1523 * Attach the entropy source rs. Must be done after 1524 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1525 */ 1526 void 1527 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1528 uint32_t flags) 1529 { 1530 uint32_t extra[4]; 1531 unsigned i = 0; 1532 1533 /* Grab cycle counter to mix extra into the pool. */ 1534 extra[i++] = entropy_timer(); 1535 1536 /* 1537 * Apply some standard flags: 1538 * 1539 * - We do not bother with network devices by default, for 1540 * hysterical raisins (perhaps: because it is often the case 1541 * that an adversary can influence network packet timings). 1542 */ 1543 switch (type) { 1544 case RND_TYPE_NET: 1545 flags |= RND_FLAG_NO_COLLECT; 1546 break; 1547 } 1548 1549 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1550 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1551 1552 /* Initialize the random source. */ 1553 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1554 strlcpy(rs->name, name, sizeof(rs->name)); 1555 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 1556 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 1557 rs->total = 0; 1558 rs->type = type; 1559 rs->flags = flags; 1560 if (E->stage >= ENTROPY_WARM) 1561 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1562 extra[i++] = entropy_timer(); 1563 1564 /* Wire it into the global list of random sources. */ 1565 if (E->stage >= ENTROPY_WARM) 1566 mutex_enter(&E->lock); 1567 LIST_INSERT_HEAD(&E->sources, rs, list); 1568 if (E->stage >= ENTROPY_WARM) 1569 mutex_exit(&E->lock); 1570 extra[i++] = entropy_timer(); 1571 1572 /* Request that it provide entropy ASAP, if we can. */ 1573 if (ISSET(flags, RND_FLAG_HASCB)) 1574 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1575 extra[i++] = entropy_timer(); 1576 1577 /* Mix the extra into the pool. */ 1578 KASSERT(i == __arraycount(extra)); 1579 entropy_enter(extra, sizeof extra, 0); 1580 explicit_memset(extra, 0, sizeof extra); 1581 } 1582 1583 /* 1584 * rnd_detach_source(rs) 1585 * 1586 * Detach the entropy source rs. May sleep waiting for users to 1587 * drain. Further use is not allowed. 1588 */ 1589 void 1590 rnd_detach_source(struct krndsource *rs) 1591 { 1592 1593 /* 1594 * If we're cold (shouldn't happen, but hey), just remove it 1595 * from the list -- there's nothing allocated. 1596 */ 1597 if (E->stage == ENTROPY_COLD) { 1598 LIST_REMOVE(rs, list); 1599 return; 1600 } 1601 1602 /* We may have to wait for entropy_request. */ 1603 ASSERT_SLEEPABLE(); 1604 1605 /* Wait until the source list is not in use, and remove it. */ 1606 mutex_enter(&E->lock); 1607 while (E->sourcelock) 1608 cv_wait(&E->sourcelock_cv, &E->lock); 1609 LIST_REMOVE(rs, list); 1610 mutex_exit(&E->lock); 1611 1612 /* Free the per-CPU data. */ 1613 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1614 } 1615 1616 /* 1617 * rnd_lock_sources() 1618 * 1619 * Prevent changes to the list of rndsources while we iterate it. 1620 * Interruptible. Caller must hold the global entropy lock. If 1621 * successful, no rndsource will go away until rnd_unlock_sources 1622 * even while the caller releases the global entropy lock. 1623 */ 1624 static int 1625 rnd_lock_sources(void) 1626 { 1627 int error; 1628 1629 KASSERT(mutex_owned(&E->lock)); 1630 1631 while (E->sourcelock) { 1632 error = cv_wait_sig(&E->sourcelock_cv, &E->lock); 1633 if (error) 1634 return error; 1635 } 1636 1637 E->sourcelock = curlwp; 1638 return 0; 1639 } 1640 1641 /* 1642 * rnd_trylock_sources() 1643 * 1644 * Try to lock the list of sources, but if it's already locked, 1645 * fail. Caller must hold the global entropy lock. If 1646 * successful, no rndsource will go away until rnd_unlock_sources 1647 * even while the caller releases the global entropy lock. 1648 */ 1649 static bool 1650 rnd_trylock_sources(void) 1651 { 1652 1653 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1654 1655 if (E->sourcelock) 1656 return false; 1657 E->sourcelock = curlwp; 1658 return true; 1659 } 1660 1661 /* 1662 * rnd_unlock_sources() 1663 * 1664 * Unlock the list of sources after rnd_lock_sources or 1665 * rnd_trylock_sources. Caller must hold the global entropy lock. 1666 */ 1667 static void 1668 rnd_unlock_sources(void) 1669 { 1670 1671 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1672 1673 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1674 curlwp, E->sourcelock); 1675 E->sourcelock = NULL; 1676 if (E->stage >= ENTROPY_WARM) 1677 cv_signal(&E->sourcelock_cv); 1678 } 1679 1680 /* 1681 * rnd_sources_locked() 1682 * 1683 * True if we hold the list of rndsources locked, for diagnostic 1684 * assertions. 1685 */ 1686 static bool __diagused 1687 rnd_sources_locked(void) 1688 { 1689 1690 return E->sourcelock == curlwp; 1691 } 1692 1693 /* 1694 * entropy_request(nbytes) 1695 * 1696 * Request nbytes bytes of entropy from all sources in the system. 1697 * OK if we overdo it. Caller must hold the global entropy lock; 1698 * will release and re-acquire it. 1699 */ 1700 static void 1701 entropy_request(size_t nbytes) 1702 { 1703 struct krndsource *rs; 1704 1705 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1706 1707 /* 1708 * If there is a request in progress, let it proceed. 1709 * Otherwise, note that a request is in progress to avoid 1710 * reentry and to block rnd_detach_source until we're done. 1711 */ 1712 if (!rnd_trylock_sources()) 1713 return; 1714 entropy_request_evcnt.ev_count++; 1715 1716 /* Clamp to the maximum reasonable request. */ 1717 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1718 1719 /* Walk the list of sources. */ 1720 LIST_FOREACH(rs, &E->sources, list) { 1721 /* Skip sources without callbacks. */ 1722 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1723 continue; 1724 1725 /* 1726 * Skip sources that are disabled altogether -- we 1727 * would just ignore their samples anyway. 1728 */ 1729 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 1730 continue; 1731 1732 /* Drop the lock while we call the callback. */ 1733 if (E->stage >= ENTROPY_WARM) 1734 mutex_exit(&E->lock); 1735 (*rs->get)(nbytes, rs->getarg); 1736 if (E->stage >= ENTROPY_WARM) 1737 mutex_enter(&E->lock); 1738 } 1739 1740 /* Notify rnd_detach_source that the request is done. */ 1741 rnd_unlock_sources(); 1742 } 1743 1744 /* 1745 * rnd_add_uint32(rs, value) 1746 * 1747 * Enter 32 bits of data from an entropy source into the pool. 1748 * 1749 * If rs is NULL, may not be called from interrupt context. 1750 * 1751 * If rs is non-NULL, may be called from any context. May drop 1752 * data if called from interrupt context. 1753 */ 1754 void 1755 rnd_add_uint32(struct krndsource *rs, uint32_t value) 1756 { 1757 1758 rnd_add_data(rs, &value, sizeof value, 0); 1759 } 1760 1761 void 1762 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 1763 { 1764 1765 rnd_add_data(rs, &value, sizeof value, 0); 1766 } 1767 1768 void 1769 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 1770 { 1771 1772 rnd_add_data(rs, &value, sizeof value, 0); 1773 } 1774 1775 /* 1776 * rnd_add_data(rs, buf, len, entropybits) 1777 * 1778 * Enter data from an entropy source into the pool, with a 1779 * driver's estimate of how much entropy the physical source of 1780 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 1781 * estimate and treat it as zero. 1782 * 1783 * If rs is NULL, may not be called from interrupt context. 1784 * 1785 * If rs is non-NULL, may be called from any context. May drop 1786 * data if called from interrupt context. 1787 */ 1788 void 1789 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 1790 uint32_t entropybits) 1791 { 1792 uint32_t extra; 1793 uint32_t flags; 1794 1795 KASSERTMSG(howmany(entropybits, NBBY) <= len, 1796 "%s: impossible entropy rate:" 1797 " %"PRIu32" bits in %"PRIu32"-byte string", 1798 rs ? rs->name : "(anonymous)", entropybits, len); 1799 1800 /* If there's no rndsource, just enter the data and time now. */ 1801 if (rs == NULL) { 1802 entropy_enter(buf, len, entropybits); 1803 extra = entropy_timer(); 1804 entropy_enter(&extra, sizeof extra, 0); 1805 explicit_memset(&extra, 0, sizeof extra); 1806 return; 1807 } 1808 1809 /* Load a snapshot of the flags. Ioctl may change them under us. */ 1810 flags = atomic_load_relaxed(&rs->flags); 1811 1812 /* 1813 * Skip if: 1814 * - we're not collecting entropy, or 1815 * - the operator doesn't want to collect entropy from this, or 1816 * - neither data nor timings are being collected from this. 1817 */ 1818 if (!atomic_load_relaxed(&entropy_collection) || 1819 ISSET(flags, RND_FLAG_NO_COLLECT) || 1820 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 1821 return; 1822 1823 /* If asked, ignore the estimate. */ 1824 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 1825 entropybits = 0; 1826 1827 /* If we are collecting data, enter them. */ 1828 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) 1829 rnd_add_data_1(rs, buf, len, entropybits, 1830 RND_FLAG_COLLECT_VALUE); 1831 1832 /* If we are collecting timings, enter one. */ 1833 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 1834 extra = entropy_timer(); 1835 rnd_add_data_1(rs, &extra, sizeof extra, 0, 1836 RND_FLAG_COLLECT_TIME); 1837 } 1838 } 1839 1840 static unsigned 1841 add_sat(unsigned a, unsigned b) 1842 { 1843 unsigned c = a + b; 1844 1845 return (c < a ? UINT_MAX : c); 1846 } 1847 1848 /* 1849 * rnd_add_data_1(rs, buf, len, entropybits, flag) 1850 * 1851 * Internal subroutine to call either entropy_enter_intr, if we're 1852 * in interrupt context, or entropy_enter if not, and to count the 1853 * entropy in an rndsource. 1854 */ 1855 static void 1856 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 1857 uint32_t entropybits, uint32_t flag) 1858 { 1859 bool fullyused; 1860 1861 /* 1862 * If we're in interrupt context, use entropy_enter_intr and 1863 * take note of whether it consumed the full sample; if not, 1864 * use entropy_enter, which always consumes the full sample. 1865 */ 1866 if (curlwp && cpu_intr_p()) { 1867 fullyused = entropy_enter_intr(buf, len, entropybits); 1868 } else { 1869 entropy_enter(buf, len, entropybits); 1870 fullyused = true; 1871 } 1872 1873 /* 1874 * If we used the full sample, note how many bits were 1875 * contributed from this source. 1876 */ 1877 if (fullyused) { 1878 if (E->stage < ENTROPY_HOT) { 1879 if (E->stage >= ENTROPY_WARM) 1880 mutex_enter(&E->lock); 1881 rs->total = add_sat(rs->total, entropybits); 1882 switch (flag) { 1883 case RND_FLAG_COLLECT_TIME: 1884 rs->time_delta.insamples = 1885 add_sat(rs->time_delta.insamples, 1); 1886 break; 1887 case RND_FLAG_COLLECT_VALUE: 1888 rs->value_delta.insamples = 1889 add_sat(rs->value_delta.insamples, 1); 1890 break; 1891 } 1892 if (E->stage >= ENTROPY_WARM) 1893 mutex_exit(&E->lock); 1894 } else { 1895 struct rndsource_cpu *rc = percpu_getref(rs->state); 1896 1897 atomic_store_relaxed(&rc->rc_entropybits, 1898 add_sat(rc->rc_entropybits, entropybits)); 1899 switch (flag) { 1900 case RND_FLAG_COLLECT_TIME: 1901 atomic_store_relaxed(&rc->rc_timesamples, 1902 add_sat(rc->rc_timesamples, 1)); 1903 break; 1904 case RND_FLAG_COLLECT_VALUE: 1905 atomic_store_relaxed(&rc->rc_datasamples, 1906 add_sat(rc->rc_datasamples, 1)); 1907 break; 1908 } 1909 percpu_putref(rs->state); 1910 } 1911 } 1912 } 1913 1914 /* 1915 * rnd_add_data_sync(rs, buf, len, entropybits) 1916 * 1917 * Same as rnd_add_data. Originally used in rndsource callbacks, 1918 * to break an unnecessary cycle; no longer really needed. 1919 */ 1920 void 1921 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 1922 uint32_t entropybits) 1923 { 1924 1925 rnd_add_data(rs, buf, len, entropybits); 1926 } 1927 1928 /* 1929 * rndsource_entropybits(rs) 1930 * 1931 * Return approximately the number of bits of entropy that have 1932 * been contributed via rs so far. Approximate if other CPUs may 1933 * be calling rnd_add_data concurrently. 1934 */ 1935 static unsigned 1936 rndsource_entropybits(struct krndsource *rs) 1937 { 1938 unsigned nbits = rs->total; 1939 1940 KASSERT(E->stage >= ENTROPY_WARM); 1941 KASSERT(rnd_sources_locked()); 1942 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 1943 return nbits; 1944 } 1945 1946 static void 1947 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1948 { 1949 struct rndsource_cpu *rc = ptr; 1950 unsigned *nbitsp = cookie; 1951 unsigned cpu_nbits; 1952 1953 cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits); 1954 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 1955 } 1956 1957 /* 1958 * rndsource_to_user(rs, urs) 1959 * 1960 * Copy a description of rs out to urs for userland. 1961 */ 1962 static void 1963 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 1964 { 1965 1966 KASSERT(E->stage >= ENTROPY_WARM); 1967 KASSERT(rnd_sources_locked()); 1968 1969 /* Avoid kernel memory disclosure. */ 1970 memset(urs, 0, sizeof(*urs)); 1971 1972 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 1973 strlcpy(urs->name, rs->name, sizeof(urs->name)); 1974 urs->total = rndsource_entropybits(rs); 1975 urs->type = rs->type; 1976 urs->flags = atomic_load_relaxed(&rs->flags); 1977 } 1978 1979 /* 1980 * rndsource_to_user_est(rs, urse) 1981 * 1982 * Copy a description of rs and estimation statistics out to urse 1983 * for userland. 1984 */ 1985 static void 1986 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 1987 { 1988 1989 KASSERT(E->stage >= ENTROPY_WARM); 1990 KASSERT(rnd_sources_locked()); 1991 1992 /* Avoid kernel memory disclosure. */ 1993 memset(urse, 0, sizeof(*urse)); 1994 1995 /* Copy out the rndsource description. */ 1996 rndsource_to_user(rs, &urse->rt); 1997 1998 /* Gather the statistics. */ 1999 urse->dt_samples = rs->time_delta.insamples; 2000 urse->dt_total = 0; 2001 urse->dv_samples = rs->value_delta.insamples; 2002 urse->dv_total = urse->rt.total; 2003 percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse); 2004 } 2005 2006 static void 2007 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2008 { 2009 struct rndsource_cpu *rc = ptr; 2010 rndsource_est_t *urse = cookie; 2011 2012 urse->dt_samples = add_sat(urse->dt_samples, 2013 atomic_load_relaxed(&rc->rc_timesamples)); 2014 urse->dv_samples = add_sat(urse->dv_samples, 2015 atomic_load_relaxed(&rc->rc_datasamples)); 2016 } 2017 2018 /* 2019 * entropy_reset_xc(arg1, arg2) 2020 * 2021 * Reset the current CPU's pending entropy to zero. 2022 */ 2023 static void 2024 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 2025 { 2026 uint32_t extra = entropy_timer(); 2027 struct entropy_cpu *ec; 2028 int s; 2029 2030 /* 2031 * Acquire the per-CPU state, blocking soft interrupts and 2032 * causing hard interrupts to drop samples on the floor. 2033 */ 2034 ec = percpu_getref(entropy_percpu); 2035 s = splsoftserial(); 2036 KASSERT(!ec->ec_locked); 2037 ec->ec_locked = true; 2038 __insn_barrier(); 2039 2040 /* Zero the pending count and enter a cycle count for fun. */ 2041 ec->ec_pending = 0; 2042 entpool_enter(ec->ec_pool, &extra, sizeof extra); 2043 2044 /* Release the per-CPU state. */ 2045 KASSERT(ec->ec_locked); 2046 __insn_barrier(); 2047 ec->ec_locked = false; 2048 splx(s); 2049 percpu_putref(entropy_percpu); 2050 } 2051 2052 /* 2053 * entropy_ioctl(cmd, data) 2054 * 2055 * Handle various /dev/random ioctl queries. 2056 */ 2057 int 2058 entropy_ioctl(unsigned long cmd, void *data) 2059 { 2060 struct krndsource *rs; 2061 bool privileged; 2062 int error; 2063 2064 KASSERT(E->stage >= ENTROPY_WARM); 2065 2066 /* Verify user's authorization to perform the ioctl. */ 2067 switch (cmd) { 2068 case RNDGETENTCNT: 2069 case RNDGETPOOLSTAT: 2070 case RNDGETSRCNUM: 2071 case RNDGETSRCNAME: 2072 case RNDGETESTNUM: 2073 case RNDGETESTNAME: 2074 error = kauth_authorize_device(kauth_cred_get(), 2075 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2076 break; 2077 case RNDCTL: 2078 error = kauth_authorize_device(kauth_cred_get(), 2079 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2080 break; 2081 case RNDADDDATA: 2082 error = kauth_authorize_device(kauth_cred_get(), 2083 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2084 /* Ascertain whether the user's inputs should be counted. */ 2085 if (kauth_authorize_device(kauth_cred_get(), 2086 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2087 NULL, NULL, NULL, NULL) == 0) 2088 privileged = true; 2089 break; 2090 default: { 2091 /* 2092 * XXX Hack to avoid changing module ABI so this can be 2093 * pulled up. Later, we can just remove the argument. 2094 */ 2095 static const struct fileops fops = { 2096 .fo_ioctl = rnd_system_ioctl, 2097 }; 2098 struct file f = { 2099 .f_ops = &fops, 2100 }; 2101 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2102 enosys(), error); 2103 #if defined(_LP64) 2104 if (error == ENOSYS) 2105 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2106 enosys(), error); 2107 #endif 2108 if (error == ENOSYS) 2109 error = ENOTTY; 2110 break; 2111 } 2112 } 2113 2114 /* If anything went wrong with authorization, stop here. */ 2115 if (error) 2116 return error; 2117 2118 /* Dispatch on the command. */ 2119 switch (cmd) { 2120 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2121 uint32_t *countp = data; 2122 2123 mutex_enter(&E->lock); 2124 *countp = ENTROPY_CAPACITY*NBBY - E->needed; 2125 mutex_exit(&E->lock); 2126 2127 break; 2128 } 2129 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2130 rndpoolstat_t *pstat = data; 2131 2132 mutex_enter(&E->lock); 2133 2134 /* parameters */ 2135 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2136 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ 2137 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2138 2139 /* state */ 2140 pstat->added = 0; /* XXX total entropy_enter count */ 2141 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; 2142 pstat->removed = 0; /* XXX total entropy_extract count */ 2143 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2144 pstat->generated = 0; /* XXX bits of data...fabricated? */ 2145 2146 mutex_exit(&E->lock); 2147 break; 2148 } 2149 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2150 rndstat_t *stat = data; 2151 uint32_t start = 0, i = 0; 2152 2153 /* Skip if none requested; fail if too many requested. */ 2154 if (stat->count == 0) 2155 break; 2156 if (stat->count > RND_MAXSTATCOUNT) 2157 return EINVAL; 2158 2159 /* 2160 * Under the lock, find the first one, copy out as many 2161 * as requested, and report how many we copied out. 2162 */ 2163 mutex_enter(&E->lock); 2164 error = rnd_lock_sources(); 2165 if (error) { 2166 mutex_exit(&E->lock); 2167 return error; 2168 } 2169 LIST_FOREACH(rs, &E->sources, list) { 2170 if (start++ == stat->start) 2171 break; 2172 } 2173 while (i < stat->count && rs != NULL) { 2174 mutex_exit(&E->lock); 2175 rndsource_to_user(rs, &stat->source[i++]); 2176 mutex_enter(&E->lock); 2177 rs = LIST_NEXT(rs, list); 2178 } 2179 KASSERT(i <= stat->count); 2180 stat->count = i; 2181 rnd_unlock_sources(); 2182 mutex_exit(&E->lock); 2183 break; 2184 } 2185 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2186 rndstat_est_t *estat = data; 2187 uint32_t start = 0, i = 0; 2188 2189 /* Skip if none requested; fail if too many requested. */ 2190 if (estat->count == 0) 2191 break; 2192 if (estat->count > RND_MAXSTATCOUNT) 2193 return EINVAL; 2194 2195 /* 2196 * Under the lock, find the first one, copy out as many 2197 * as requested, and report how many we copied out. 2198 */ 2199 mutex_enter(&E->lock); 2200 error = rnd_lock_sources(); 2201 if (error) { 2202 mutex_exit(&E->lock); 2203 return error; 2204 } 2205 LIST_FOREACH(rs, &E->sources, list) { 2206 if (start++ == estat->start) 2207 break; 2208 } 2209 while (i < estat->count && rs != NULL) { 2210 mutex_exit(&E->lock); 2211 rndsource_to_user_est(rs, &estat->source[i++]); 2212 mutex_enter(&E->lock); 2213 rs = LIST_NEXT(rs, list); 2214 } 2215 KASSERT(i <= estat->count); 2216 estat->count = i; 2217 rnd_unlock_sources(); 2218 mutex_exit(&E->lock); 2219 break; 2220 } 2221 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2222 rndstat_name_t *nstat = data; 2223 const size_t n = sizeof(rs->name); 2224 2225 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2226 2227 /* 2228 * Under the lock, search by name. If found, copy it 2229 * out; if not found, fail with ENOENT. 2230 */ 2231 mutex_enter(&E->lock); 2232 error = rnd_lock_sources(); 2233 if (error) { 2234 mutex_exit(&E->lock); 2235 return error; 2236 } 2237 LIST_FOREACH(rs, &E->sources, list) { 2238 if (strncmp(rs->name, nstat->name, n) == 0) 2239 break; 2240 } 2241 if (rs != NULL) { 2242 mutex_exit(&E->lock); 2243 rndsource_to_user(rs, &nstat->source); 2244 mutex_enter(&E->lock); 2245 } else { 2246 error = ENOENT; 2247 } 2248 rnd_unlock_sources(); 2249 mutex_exit(&E->lock); 2250 break; 2251 } 2252 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2253 rndstat_est_name_t *enstat = data; 2254 const size_t n = sizeof(rs->name); 2255 2256 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2257 2258 /* 2259 * Under the lock, search by name. If found, copy it 2260 * out; if not found, fail with ENOENT. 2261 */ 2262 mutex_enter(&E->lock); 2263 error = rnd_lock_sources(); 2264 if (error) { 2265 mutex_exit(&E->lock); 2266 return error; 2267 } 2268 LIST_FOREACH(rs, &E->sources, list) { 2269 if (strncmp(rs->name, enstat->name, n) == 0) 2270 break; 2271 } 2272 if (rs != NULL) { 2273 mutex_exit(&E->lock); 2274 rndsource_to_user_est(rs, &enstat->source); 2275 mutex_enter(&E->lock); 2276 } else { 2277 error = ENOENT; 2278 } 2279 rnd_unlock_sources(); 2280 mutex_exit(&E->lock); 2281 break; 2282 } 2283 case RNDCTL: { /* Modify entropy source flags. */ 2284 rndctl_t *rndctl = data; 2285 const size_t n = sizeof(rs->name); 2286 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2287 uint32_t flags; 2288 bool reset = false, request = false; 2289 2290 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2291 2292 /* Whitelist the flags that user can change. */ 2293 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2294 2295 /* 2296 * For each matching rndsource, either by type if 2297 * specified or by name if not, set the masked flags. 2298 */ 2299 mutex_enter(&E->lock); 2300 LIST_FOREACH(rs, &E->sources, list) { 2301 if (rndctl->type != 0xff) { 2302 if (rs->type != rndctl->type) 2303 continue; 2304 } else { 2305 if (strncmp(rs->name, rndctl->name, n) != 0) 2306 continue; 2307 } 2308 flags = rs->flags & ~rndctl->mask; 2309 flags |= rndctl->flags & rndctl->mask; 2310 if ((rs->flags & resetflags) == 0 && 2311 (flags & resetflags) != 0) 2312 reset = true; 2313 if ((rs->flags ^ flags) & resetflags) 2314 request = true; 2315 atomic_store_relaxed(&rs->flags, flags); 2316 } 2317 mutex_exit(&E->lock); 2318 2319 /* 2320 * If we disabled estimation or collection, nix all the 2321 * pending entropy and set needed to the maximum. 2322 */ 2323 if (reset) { 2324 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2325 mutex_enter(&E->lock); 2326 E->pending = 0; 2327 atomic_store_relaxed(&E->needed, 2328 ENTROPY_CAPACITY*NBBY); 2329 mutex_exit(&E->lock); 2330 } 2331 2332 /* 2333 * If we changed any of the estimation or collection 2334 * flags, request new samples from everyone -- either 2335 * to make up for what we just lost, or to get new 2336 * samples from what we just added. 2337 */ 2338 if (request) { 2339 mutex_enter(&E->lock); 2340 entropy_request(ENTROPY_CAPACITY); 2341 mutex_exit(&E->lock); 2342 } 2343 break; 2344 } 2345 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2346 rnddata_t *rdata = data; 2347 unsigned entropybits = 0; 2348 2349 if (!atomic_load_relaxed(&entropy_collection)) 2350 break; /* thanks but no thanks */ 2351 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2352 return EINVAL; 2353 2354 /* 2355 * This ioctl serves as the userland alternative a 2356 * bootloader-provided seed -- typically furnished by 2357 * /etc/rc.d/random_seed. We accept the user's entropy 2358 * claim only if 2359 * 2360 * (a) the user is privileged, and 2361 * (b) we have not entered a bootloader seed. 2362 * 2363 * under the assumption that the user may use this to 2364 * load a seed from disk that we have already loaded 2365 * from the bootloader, so we don't double-count it. 2366 */ 2367 if (privileged && rdata->entropy && rdata->len) { 2368 mutex_enter(&E->lock); 2369 if (!E->seeded) { 2370 entropybits = MIN(rdata->entropy, 2371 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2372 E->seeded = true; 2373 } 2374 mutex_exit(&E->lock); 2375 } 2376 2377 /* Enter the data and consolidate entropy. */ 2378 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2379 entropybits); 2380 entropy_consolidate(); 2381 break; 2382 } 2383 default: 2384 error = ENOTTY; 2385 } 2386 2387 /* Return any error that may have come up. */ 2388 return error; 2389 } 2390 2391 /* Legacy entry points */ 2392 2393 void 2394 rnd_seed(void *seed, size_t len) 2395 { 2396 2397 if (len != sizeof(rndsave_t)) { 2398 printf("entropy: invalid seed length: %zu," 2399 " expected sizeof(rndsave_t) = %zu\n", 2400 len, sizeof(rndsave_t)); 2401 return; 2402 } 2403 entropy_seed(seed); 2404 } 2405 2406 void 2407 rnd_init(void) 2408 { 2409 2410 entropy_init(); 2411 } 2412 2413 void 2414 rnd_init_softint(void) 2415 { 2416 2417 entropy_init_late(); 2418 } 2419 2420 int 2421 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2422 { 2423 2424 return entropy_ioctl(cmd, data); 2425 } 2426