1 /* $NetBSD: kern_entropy.c,v 1.30 2021/02/12 19:48:26 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * No entropy estimation based on the sample values, which is a 66 * contradiction in terms and a potential source of side 67 * channels. It is the responsibility of the driver author to 68 * study how predictable the physical source of input can ever 69 * be, and to furnish a lower bound on the amount of entropy it 70 * has. 71 * 72 * * Entropy depletion is available for testing (or if you're into 73 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 74 * the logic to support it is small, to minimize chance of bugs. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.30 2021/02/12 19:48:26 jmcneill Exp $"); 79 80 #include <sys/param.h> 81 #include <sys/types.h> 82 #include <sys/atomic.h> 83 #include <sys/compat_stub.h> 84 #include <sys/condvar.h> 85 #include <sys/cpu.h> 86 #include <sys/entropy.h> 87 #include <sys/errno.h> 88 #include <sys/evcnt.h> 89 #include <sys/event.h> 90 #include <sys/file.h> 91 #include <sys/intr.h> 92 #include <sys/kauth.h> 93 #include <sys/kernel.h> 94 #include <sys/kmem.h> 95 #include <sys/kthread.h> 96 #include <sys/module_hook.h> 97 #include <sys/mutex.h> 98 #include <sys/percpu.h> 99 #include <sys/poll.h> 100 #include <sys/queue.h> 101 #include <sys/reboot.h> 102 #include <sys/rnd.h> /* legacy kernel API */ 103 #include <sys/rndio.h> /* userland ioctl interface */ 104 #include <sys/rndsource.h> /* kernel rndsource driver API */ 105 #include <sys/select.h> 106 #include <sys/selinfo.h> 107 #include <sys/sha1.h> /* for boot seed checksum */ 108 #include <sys/stdint.h> 109 #include <sys/sysctl.h> 110 #include <sys/syslog.h> 111 #include <sys/systm.h> 112 #include <sys/time.h> 113 #include <sys/xcall.h> 114 115 #include <lib/libkern/entpool.h> 116 117 #include <machine/limits.h> 118 119 #ifdef __HAVE_CPU_COUNTER 120 #include <machine/cpu_counter.h> 121 #endif 122 123 /* 124 * struct entropy_cpu 125 * 126 * Per-CPU entropy state. The pool is allocated separately 127 * because percpu(9) sometimes moves per-CPU objects around 128 * without zeroing them, which would lead to unwanted copies of 129 * sensitive secrets. The evcnt is allocated separately becuase 130 * evcnt(9) assumes it stays put in memory. 131 */ 132 struct entropy_cpu { 133 struct evcnt *ec_softint_evcnt; 134 struct entpool *ec_pool; 135 unsigned ec_pending; 136 bool ec_locked; 137 }; 138 139 /* 140 * struct rndsource_cpu 141 * 142 * Per-CPU rndsource state. 143 */ 144 struct rndsource_cpu { 145 unsigned rc_entropybits; 146 unsigned rc_timesamples; 147 unsigned rc_datasamples; 148 }; 149 150 /* 151 * entropy_global (a.k.a. E for short in this file) 152 * 153 * Global entropy state. Writes protected by the global lock. 154 * Some fields, marked (A), can be read outside the lock, and are 155 * maintained with atomic_load/store_relaxed. 156 */ 157 struct { 158 kmutex_t lock; /* covers all global state */ 159 struct entpool pool; /* global pool for extraction */ 160 unsigned needed; /* (A) needed globally */ 161 unsigned pending; /* (A) pending in per-CPU pools */ 162 unsigned timestamp; /* (A) time of last consolidation */ 163 unsigned epoch; /* (A) changes when needed -> 0 */ 164 kcondvar_t cv; /* notifies state changes */ 165 struct selinfo selq; /* notifies needed -> 0 */ 166 struct lwp *sourcelock; /* lock on list of sources */ 167 kcondvar_t sourcelock_cv; /* notifies sourcelock release */ 168 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 169 enum entropy_stage { 170 ENTROPY_COLD = 0, /* single-threaded */ 171 ENTROPY_WARM, /* multi-threaded at boot before CPUs */ 172 ENTROPY_HOT, /* multi-threaded multi-CPU */ 173 } stage; 174 bool consolidate; /* kick thread to consolidate */ 175 bool seed_rndsource; /* true if seed source is attached */ 176 bool seeded; /* true if seed file already loaded */ 177 } entropy_global __cacheline_aligned = { 178 /* Fields that must be initialized when the kernel is loaded. */ 179 .needed = ENTROPY_CAPACITY*NBBY, 180 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 181 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 182 .stage = ENTROPY_COLD, 183 }; 184 185 #define E (&entropy_global) /* declutter */ 186 187 /* Read-mostly globals */ 188 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 189 static void *entropy_sih __read_mostly; /* softint handler */ 190 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 191 192 int rnd_initial_entropy __read_mostly; /* XXX legacy */ 193 194 static struct krndsource seed_rndsource __read_mostly; 195 196 /* 197 * Event counters 198 * 199 * Must be careful with adding these because they can serve as 200 * side channels. 201 */ 202 static struct evcnt entropy_discretionary_evcnt = 203 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 204 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 205 static struct evcnt entropy_immediate_evcnt = 206 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 207 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 208 static struct evcnt entropy_partial_evcnt = 209 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 210 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 211 static struct evcnt entropy_consolidate_evcnt = 212 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 213 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 214 static struct evcnt entropy_extract_intr_evcnt = 215 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr"); 216 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt); 217 static struct evcnt entropy_extract_fail_evcnt = 218 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 219 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 220 static struct evcnt entropy_request_evcnt = 221 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 222 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 223 static struct evcnt entropy_deplete_evcnt = 224 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 225 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 226 static struct evcnt entropy_notify_evcnt = 227 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 228 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 229 230 /* Sysctl knobs */ 231 static bool entropy_collection = 1; 232 static bool entropy_depletion = 0; /* Silly! */ 233 234 static const struct sysctlnode *entropy_sysctlroot; 235 static struct sysctllog *entropy_sysctllog; 236 237 /* Forward declarations */ 238 static void entropy_init_cpu(void *, void *, struct cpu_info *); 239 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 240 static void entropy_account_cpu(struct entropy_cpu *); 241 static void entropy_enter(const void *, size_t, unsigned); 242 static bool entropy_enter_intr(const void *, size_t, unsigned); 243 static void entropy_softintr(void *); 244 static void entropy_thread(void *); 245 static uint32_t entropy_pending(void); 246 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 247 static void entropy_do_consolidate(void); 248 static void entropy_consolidate_xc(void *, void *); 249 static void entropy_notify(void); 250 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 251 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 252 static void filt_entropy_read_detach(struct knote *); 253 static int filt_entropy_read_event(struct knote *, long); 254 static void entropy_request(size_t); 255 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 256 uint32_t, uint32_t); 257 static unsigned rndsource_entropybits(struct krndsource *); 258 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 259 static void rndsource_to_user(struct krndsource *, rndsource_t *); 260 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 261 static void rndsource_to_user_est_cpu(void *, void *, struct cpu_info *); 262 263 /* 264 * entropy_timer() 265 * 266 * Cycle counter, time counter, or anything that changes a wee bit 267 * unpredictably. 268 */ 269 static inline uint32_t 270 entropy_timer(void) 271 { 272 struct bintime bt; 273 uint32_t v; 274 275 /* If we have a CPU cycle counter, use the low 32 bits. */ 276 #ifdef __HAVE_CPU_COUNTER 277 if (__predict_true(cpu_hascounter())) 278 return cpu_counter32(); 279 #endif /* __HAVE_CPU_COUNTER */ 280 281 /* If we're cold, tough. Can't binuptime while cold. */ 282 if (__predict_false(cold)) 283 return 0; 284 285 /* Fold the 128 bits of binuptime into 32 bits. */ 286 binuptime(&bt); 287 v = bt.frac; 288 v ^= bt.frac >> 32; 289 v ^= bt.sec; 290 v ^= bt.sec >> 32; 291 return v; 292 } 293 294 static void 295 attach_seed_rndsource(void) 296 { 297 298 /* 299 * First called no later than entropy_init, while we are still 300 * single-threaded, so no need for RUN_ONCE. 301 */ 302 if (E->stage >= ENTROPY_WARM || E->seed_rndsource) 303 return; 304 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 305 RND_FLAG_COLLECT_VALUE); 306 E->seed_rndsource = true; 307 } 308 309 /* 310 * entropy_init() 311 * 312 * Initialize the entropy subsystem. Panic on failure. 313 * 314 * Requires percpu(9) and sysctl(9) to be initialized. 315 */ 316 static void 317 entropy_init(void) 318 { 319 uint32_t extra[2]; 320 struct krndsource *rs; 321 unsigned i = 0; 322 323 KASSERT(E->stage == ENTROPY_COLD); 324 325 /* Grab some cycle counts early at boot. */ 326 extra[i++] = entropy_timer(); 327 328 /* Run the entropy pool cryptography self-test. */ 329 if (entpool_selftest() == -1) 330 panic("entropy pool crypto self-test failed"); 331 332 /* Create the sysctl directory. */ 333 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 334 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 335 SYSCTL_DESCR("Entropy (random number sources) options"), 336 NULL, 0, NULL, 0, 337 CTL_KERN, CTL_CREATE, CTL_EOL); 338 339 /* Create the sysctl knobs. */ 340 /* XXX These shouldn't be writable at securelevel>0. */ 341 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 342 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 343 SYSCTL_DESCR("Automatically collect entropy from hardware"), 344 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 345 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 346 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 347 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 348 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 349 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 350 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 351 SYSCTL_DESCR("Trigger entropy consolidation now"), 352 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 353 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 354 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 355 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 356 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 357 /* XXX These should maybe not be readable at securelevel>0. */ 358 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 359 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 360 "needed", SYSCTL_DESCR("Systemwide entropy deficit"), 361 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); 362 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 363 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 364 "pending", SYSCTL_DESCR("Entropy pending on CPUs"), 365 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); 366 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 367 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 368 "epoch", SYSCTL_DESCR("Entropy epoch"), 369 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 370 371 /* Initialize the global state for multithreaded operation. */ 372 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); 373 cv_init(&E->cv, "entropy"); 374 selinit(&E->selq); 375 cv_init(&E->sourcelock_cv, "entsrclock"); 376 377 /* Make sure the seed source is attached. */ 378 attach_seed_rndsource(); 379 380 /* Note if the bootloader didn't provide a seed. */ 381 if (!E->seeded) 382 aprint_debug("entropy: no seed from bootloader\n"); 383 384 /* Allocate the per-CPU records for all early entropy sources. */ 385 LIST_FOREACH(rs, &E->sources, list) 386 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 387 388 /* Enter the boot cycle count to get started. */ 389 extra[i++] = entropy_timer(); 390 KASSERT(i == __arraycount(extra)); 391 entropy_enter(extra, sizeof extra, 0); 392 explicit_memset(extra, 0, sizeof extra); 393 394 /* We are now ready for multi-threaded operation. */ 395 E->stage = ENTROPY_WARM; 396 } 397 398 /* 399 * entropy_init_late() 400 * 401 * Late initialization. Panic on failure. 402 * 403 * Requires CPUs to have been detected and LWPs to have started. 404 */ 405 static void 406 entropy_init_late(void) 407 { 408 int error; 409 410 KASSERT(E->stage == ENTROPY_WARM); 411 412 /* Allocate and initialize the per-CPU state. */ 413 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 414 entropy_init_cpu, entropy_fini_cpu, NULL); 415 416 /* 417 * Establish the softint at the highest softint priority level. 418 * Must happen after CPU detection. 419 */ 420 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 421 &entropy_softintr, NULL); 422 if (entropy_sih == NULL) 423 panic("unable to establish entropy softint"); 424 425 /* 426 * Create the entropy housekeeping thread. Must happen after 427 * lwpinit. 428 */ 429 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 430 entropy_thread, NULL, &entropy_lwp, "entbutler"); 431 if (error) 432 panic("unable to create entropy housekeeping thread: %d", 433 error); 434 435 /* 436 * Wait until the per-CPU initialization has hit all CPUs 437 * before proceeding to mark the entropy system hot. 438 */ 439 xc_barrier(XC_HIGHPRI); 440 E->stage = ENTROPY_HOT; 441 } 442 443 /* 444 * entropy_init_cpu(ptr, cookie, ci) 445 * 446 * percpu(9) constructor for per-CPU entropy pool. 447 */ 448 static void 449 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 450 { 451 struct entropy_cpu *ec = ptr; 452 453 ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), 454 KM_SLEEP); 455 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 456 ec->ec_pending = 0; 457 ec->ec_locked = false; 458 459 evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, 460 ci->ci_cpuname, "entropy softint"); 461 } 462 463 /* 464 * entropy_fini_cpu(ptr, cookie, ci) 465 * 466 * percpu(9) destructor for per-CPU entropy pool. 467 */ 468 static void 469 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 470 { 471 struct entropy_cpu *ec = ptr; 472 473 /* 474 * Zero any lingering data. Disclosure of the per-CPU pool 475 * shouldn't retroactively affect the security of any keys 476 * generated, because entpool(9) erases whatever we have just 477 * drawn out of any pool, but better safe than sorry. 478 */ 479 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 480 481 evcnt_detach(ec->ec_softint_evcnt); 482 483 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 484 kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); 485 } 486 487 /* 488 * entropy_seed(seed) 489 * 490 * Seed the entropy pool with seed. Meant to be called as early 491 * as possible by the bootloader; may be called before or after 492 * entropy_init. Must be called before system reaches userland. 493 * Must be called in thread or soft interrupt context, not in hard 494 * interrupt context. Must be called at most once. 495 * 496 * Overwrites the seed in place. Caller may then free the memory. 497 */ 498 static void 499 entropy_seed(rndsave_t *seed) 500 { 501 SHA1_CTX ctx; 502 uint8_t digest[SHA1_DIGEST_LENGTH]; 503 bool seeded; 504 505 /* 506 * Verify the checksum. If the checksum fails, take the data 507 * but ignore the entropy estimate -- the file may have been 508 * incompletely written with garbage, which is harmless to add 509 * but may not be as unpredictable as alleged. 510 */ 511 SHA1Init(&ctx); 512 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 513 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 514 SHA1Final(digest, &ctx); 515 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 516 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 517 printf("entropy: invalid seed checksum\n"); 518 seed->entropy = 0; 519 } 520 explicit_memset(&ctx, 0, sizeof ctx); 521 explicit_memset(digest, 0, sizeof digest); 522 523 /* 524 * If the entropy is insensibly large, try byte-swapping. 525 * Otherwise assume the file is corrupted and act as though it 526 * has zero entropy. 527 */ 528 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 529 seed->entropy = bswap32(seed->entropy); 530 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 531 seed->entropy = 0; 532 } 533 534 /* Make sure the seed source is attached. */ 535 attach_seed_rndsource(); 536 537 /* Test and set E->seeded. */ 538 if (E->stage >= ENTROPY_WARM) 539 mutex_enter(&E->lock); 540 seeded = E->seeded; 541 E->seeded = (seed->entropy > 0); 542 if (E->stage >= ENTROPY_WARM) 543 mutex_exit(&E->lock); 544 545 /* 546 * If we've been seeded, may be re-entering the same seed 547 * (e.g., bootloader vs module init, or something). No harm in 548 * entering it twice, but it contributes no additional entropy. 549 */ 550 if (seeded) { 551 printf("entropy: double-seeded by bootloader\n"); 552 seed->entropy = 0; 553 } else { 554 printf("entropy: entering seed from bootloader" 555 " with %u bits of entropy\n", (unsigned)seed->entropy); 556 } 557 558 /* Enter it into the pool and promptly zero it. */ 559 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 560 seed->entropy); 561 explicit_memset(seed, 0, sizeof(*seed)); 562 } 563 564 /* 565 * entropy_bootrequest() 566 * 567 * Request entropy from all sources at boot, once config is 568 * complete and interrupts are running. 569 */ 570 void 571 entropy_bootrequest(void) 572 { 573 574 KASSERT(E->stage >= ENTROPY_WARM); 575 576 /* 577 * Request enough to satisfy the maximum entropy shortage. 578 * This is harmless overkill if the bootloader provided a seed. 579 */ 580 mutex_enter(&E->lock); 581 entropy_request(ENTROPY_CAPACITY); 582 mutex_exit(&E->lock); 583 } 584 585 /* 586 * entropy_epoch() 587 * 588 * Returns the current entropy epoch. If this changes, you should 589 * reseed. If -1, means system entropy has not yet reached full 590 * entropy or been explicitly consolidated; never reverts back to 591 * -1. Never zero, so you can always use zero as an uninitialized 592 * sentinel value meaning `reseed ASAP'. 593 * 594 * Usage model: 595 * 596 * struct foo { 597 * struct crypto_prng prng; 598 * unsigned epoch; 599 * } *foo; 600 * 601 * unsigned epoch = entropy_epoch(); 602 * if (__predict_false(epoch != foo->epoch)) { 603 * uint8_t seed[32]; 604 * if (entropy_extract(seed, sizeof seed, 0) != 0) 605 * warn("no entropy"); 606 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 607 * foo->epoch = epoch; 608 * } 609 */ 610 unsigned 611 entropy_epoch(void) 612 { 613 614 /* 615 * Unsigned int, so no need for seqlock for an atomic read, but 616 * make sure we read it afresh each time. 617 */ 618 return atomic_load_relaxed(&E->epoch); 619 } 620 621 /* 622 * entropy_ready() 623 * 624 * True if the entropy pool has full entropy. 625 */ 626 bool 627 entropy_ready(void) 628 { 629 630 return atomic_load_relaxed(&E->needed) == 0; 631 } 632 633 /* 634 * entropy_account_cpu(ec) 635 * 636 * Consider whether to consolidate entropy into the global pool 637 * after we just added some into the current CPU's pending pool. 638 * 639 * - If this CPU can provide enough entropy now, do so. 640 * 641 * - If this and whatever else is available on other CPUs can 642 * provide enough entropy, kick the consolidation thread. 643 * 644 * - Otherwise, do as little as possible, except maybe consolidate 645 * entropy at most once a minute. 646 * 647 * Caller must be bound to a CPU and therefore have exclusive 648 * access to ec. Will acquire and release the global lock. 649 */ 650 static void 651 entropy_account_cpu(struct entropy_cpu *ec) 652 { 653 unsigned diff; 654 655 KASSERT(E->stage == ENTROPY_HOT); 656 657 /* 658 * If there's no entropy needed, and entropy has been 659 * consolidated in the last minute, do nothing. 660 */ 661 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 662 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 663 __predict_true((time_uptime - E->timestamp) <= 60)) 664 return; 665 666 /* If there's nothing pending, stop here. */ 667 if (ec->ec_pending == 0) 668 return; 669 670 /* Consider consolidation, under the lock. */ 671 mutex_enter(&E->lock); 672 if (E->needed != 0 && E->needed <= ec->ec_pending) { 673 /* 674 * If we have not yet attained full entropy but we can 675 * now, do so. This way we disseminate entropy 676 * promptly when it becomes available early at boot; 677 * otherwise we leave it to the entropy consolidation 678 * thread, which is rate-limited to mitigate side 679 * channels and abuse. 680 */ 681 uint8_t buf[ENTPOOL_CAPACITY]; 682 683 /* Transfer from the local pool to the global pool. */ 684 entpool_extract(ec->ec_pool, buf, sizeof buf); 685 entpool_enter(&E->pool, buf, sizeof buf); 686 atomic_store_relaxed(&ec->ec_pending, 0); 687 atomic_store_relaxed(&E->needed, 0); 688 689 /* Notify waiters that we now have full entropy. */ 690 entropy_notify(); 691 entropy_immediate_evcnt.ev_count++; 692 } else { 693 /* Record how much we can add to the global pool. */ 694 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); 695 E->pending += diff; 696 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); 697 698 /* 699 * This should have made a difference unless we were 700 * already saturated. 701 */ 702 KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY); 703 KASSERT(E->pending); 704 705 if (E->needed <= E->pending) { 706 /* 707 * Enough entropy between all the per-CPU 708 * pools. Wake up the housekeeping thread. 709 * 710 * If we don't need any entropy, this doesn't 711 * mean much, but it is the only time we ever 712 * gather additional entropy in case the 713 * accounting has been overly optimistic. This 714 * happens at most once a minute, so there's 715 * negligible performance cost. 716 */ 717 E->consolidate = true; 718 cv_broadcast(&E->cv); 719 if (E->needed == 0) 720 entropy_discretionary_evcnt.ev_count++; 721 } else { 722 /* Can't get full entropy. Keep gathering. */ 723 entropy_partial_evcnt.ev_count++; 724 } 725 } 726 mutex_exit(&E->lock); 727 } 728 729 /* 730 * entropy_enter_early(buf, len, nbits) 731 * 732 * Do entropy bookkeeping globally, before we have established 733 * per-CPU pools. Enter directly into the global pool in the hope 734 * that we enter enough before the first entropy_extract to thwart 735 * iterative-guessing attacks; entropy_extract will warn if not. 736 */ 737 static void 738 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 739 { 740 bool notify = false; 741 742 if (E->stage >= ENTROPY_WARM) 743 mutex_enter(&E->lock); 744 745 /* Enter it into the pool. */ 746 entpool_enter(&E->pool, buf, len); 747 748 /* 749 * Decide whether to notify reseed -- we will do so if either: 750 * (a) we transition from partial entropy to full entropy, or 751 * (b) we get a batch of full entropy all at once. 752 */ 753 notify |= (E->needed && E->needed <= nbits); 754 notify |= (nbits >= ENTROPY_CAPACITY*NBBY); 755 756 /* Subtract from the needed count and notify if appropriate. */ 757 E->needed -= MIN(E->needed, nbits); 758 if (notify) { 759 entropy_notify(); 760 entropy_immediate_evcnt.ev_count++; 761 } 762 763 if (E->stage >= ENTROPY_WARM) 764 mutex_exit(&E->lock); 765 } 766 767 /* 768 * entropy_enter(buf, len, nbits) 769 * 770 * Enter len bytes of data from buf into the system's entropy 771 * pool, stirring as necessary when the internal buffer fills up. 772 * nbits is a lower bound on the number of bits of entropy in the 773 * process that led to this sample. 774 */ 775 static void 776 entropy_enter(const void *buf, size_t len, unsigned nbits) 777 { 778 struct entropy_cpu *ec; 779 uint32_t pending; 780 int s; 781 782 KASSERTMSG(!cpu_intr_p(), 783 "use entropy_enter_intr from interrupt context"); 784 KASSERTMSG(howmany(nbits, NBBY) <= len, 785 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 786 787 /* If it's too early after boot, just use entropy_enter_early. */ 788 if (__predict_false(E->stage < ENTROPY_HOT)) { 789 entropy_enter_early(buf, len, nbits); 790 return; 791 } 792 793 /* 794 * Acquire the per-CPU state, blocking soft interrupts and 795 * causing hard interrupts to drop samples on the floor. 796 */ 797 ec = percpu_getref(entropy_percpu); 798 s = splsoftserial(); 799 KASSERT(!ec->ec_locked); 800 ec->ec_locked = true; 801 __insn_barrier(); 802 803 /* Enter into the per-CPU pool. */ 804 entpool_enter(ec->ec_pool, buf, len); 805 806 /* Count up what we can add. */ 807 pending = ec->ec_pending; 808 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 809 atomic_store_relaxed(&ec->ec_pending, pending); 810 811 /* Consolidate globally if appropriate based on what we added. */ 812 entropy_account_cpu(ec); 813 814 /* Release the per-CPU state. */ 815 KASSERT(ec->ec_locked); 816 __insn_barrier(); 817 ec->ec_locked = false; 818 splx(s); 819 percpu_putref(entropy_percpu); 820 } 821 822 /* 823 * entropy_enter_intr(buf, len, nbits) 824 * 825 * Enter up to len bytes of data from buf into the system's 826 * entropy pool without stirring. nbits is a lower bound on the 827 * number of bits of entropy in the process that led to this 828 * sample. If the sample could be entered completely, assume 829 * nbits of entropy pending; otherwise assume none, since we don't 830 * know whether some parts of the sample are constant, for 831 * instance. Schedule a softint to stir the entropy pool if 832 * needed. Return true if used fully, false if truncated at all. 833 * 834 * Using this in thread context will work, but you might as well 835 * use entropy_enter in that case. 836 */ 837 static bool 838 entropy_enter_intr(const void *buf, size_t len, unsigned nbits) 839 { 840 struct entropy_cpu *ec; 841 bool fullyused = false; 842 uint32_t pending; 843 844 KASSERTMSG(howmany(nbits, NBBY) <= len, 845 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 846 847 /* If it's too early after boot, just use entropy_enter_early. */ 848 if (__predict_false(E->stage < ENTROPY_HOT)) { 849 entropy_enter_early(buf, len, nbits); 850 return true; 851 } 852 853 /* 854 * Acquire the per-CPU state. If someone is in the middle of 855 * using it, drop the sample. Otherwise, take the lock so that 856 * higher-priority interrupts will drop their samples. 857 */ 858 ec = percpu_getref(entropy_percpu); 859 if (ec->ec_locked) 860 goto out0; 861 ec->ec_locked = true; 862 __insn_barrier(); 863 864 /* 865 * Enter as much as we can into the per-CPU pool. If it was 866 * truncated, schedule a softint to stir the pool and stop. 867 */ 868 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 869 softint_schedule(entropy_sih); 870 goto out1; 871 } 872 fullyused = true; 873 874 /* Count up what we can contribute. */ 875 pending = ec->ec_pending; 876 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 877 atomic_store_relaxed(&ec->ec_pending, pending); 878 879 /* Schedule a softint if we added anything and it matters. */ 880 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || 881 atomic_load_relaxed(&entropy_depletion)) && 882 nbits != 0) 883 softint_schedule(entropy_sih); 884 885 out1: /* Release the per-CPU state. */ 886 KASSERT(ec->ec_locked); 887 __insn_barrier(); 888 ec->ec_locked = false; 889 out0: percpu_putref(entropy_percpu); 890 891 return fullyused; 892 } 893 894 /* 895 * entropy_softintr(cookie) 896 * 897 * Soft interrupt handler for entering entropy. Takes care of 898 * stirring the local CPU's entropy pool if it filled up during 899 * hard interrupts, and promptly crediting entropy from the local 900 * CPU's entropy pool to the global entropy pool if needed. 901 */ 902 static void 903 entropy_softintr(void *cookie) 904 { 905 struct entropy_cpu *ec; 906 907 /* 908 * Acquire the per-CPU state. Other users can lock this only 909 * while soft interrupts are blocked. Cause hard interrupts to 910 * drop samples on the floor. 911 */ 912 ec = percpu_getref(entropy_percpu); 913 KASSERT(!ec->ec_locked); 914 ec->ec_locked = true; 915 __insn_barrier(); 916 917 /* Count statistics. */ 918 ec->ec_softint_evcnt->ev_count++; 919 920 /* Stir the pool if necessary. */ 921 entpool_stir(ec->ec_pool); 922 923 /* Consolidate globally if appropriate based on what we added. */ 924 entropy_account_cpu(ec); 925 926 /* Release the per-CPU state. */ 927 KASSERT(ec->ec_locked); 928 __insn_barrier(); 929 ec->ec_locked = false; 930 percpu_putref(entropy_percpu); 931 } 932 933 /* 934 * entropy_thread(cookie) 935 * 936 * Handle any asynchronous entropy housekeeping. 937 */ 938 static void 939 entropy_thread(void *cookie) 940 { 941 bool consolidate; 942 943 for (;;) { 944 /* 945 * Wait until there's full entropy somewhere among the 946 * CPUs, as confirmed at most once per minute, or 947 * someone wants to consolidate. 948 */ 949 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { 950 consolidate = true; 951 } else { 952 mutex_enter(&E->lock); 953 if (!E->consolidate) 954 cv_timedwait(&E->cv, &E->lock, 60*hz); 955 consolidate = E->consolidate; 956 E->consolidate = false; 957 mutex_exit(&E->lock); 958 } 959 960 if (consolidate) { 961 /* Do it. */ 962 entropy_do_consolidate(); 963 964 /* Mitigate abuse. */ 965 kpause("entropy", false, hz, NULL); 966 } 967 } 968 } 969 970 /* 971 * entropy_pending() 972 * 973 * Count up the amount of entropy pending on other CPUs. 974 */ 975 static uint32_t 976 entropy_pending(void) 977 { 978 uint32_t pending = 0; 979 980 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); 981 return pending; 982 } 983 984 static void 985 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 986 { 987 struct entropy_cpu *ec = ptr; 988 uint32_t *pendingp = cookie; 989 uint32_t cpu_pending; 990 991 cpu_pending = atomic_load_relaxed(&ec->ec_pending); 992 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); 993 } 994 995 /* 996 * entropy_do_consolidate() 997 * 998 * Issue a cross-call to gather entropy on all CPUs and advance 999 * the entropy epoch. 1000 */ 1001 static void 1002 entropy_do_consolidate(void) 1003 { 1004 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1005 static struct timeval lasttime; /* serialized by E->lock */ 1006 struct entpool pool; 1007 uint8_t buf[ENTPOOL_CAPACITY]; 1008 unsigned diff; 1009 uint64_t ticket; 1010 1011 /* Gather entropy on all CPUs into a temporary pool. */ 1012 memset(&pool, 0, sizeof pool); 1013 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 1014 xc_wait(ticket); 1015 1016 /* Acquire the lock to notify waiters. */ 1017 mutex_enter(&E->lock); 1018 1019 /* Count another consolidation. */ 1020 entropy_consolidate_evcnt.ev_count++; 1021 1022 /* Note when we last consolidated, i.e. now. */ 1023 E->timestamp = time_uptime; 1024 1025 /* Mix what we gathered into the global pool. */ 1026 entpool_extract(&pool, buf, sizeof buf); 1027 entpool_enter(&E->pool, buf, sizeof buf); 1028 explicit_memset(&pool, 0, sizeof pool); 1029 1030 /* Count the entropy that was gathered. */ 1031 diff = MIN(E->needed, E->pending); 1032 atomic_store_relaxed(&E->needed, E->needed - diff); 1033 E->pending -= diff; 1034 if (__predict_false(E->needed > 0)) { 1035 if (ratecheck(&lasttime, &interval) && 1036 (boothowto & AB_DEBUG) != 0) { 1037 printf("entropy: WARNING:" 1038 " consolidating less than full entropy\n"); 1039 } 1040 } 1041 1042 /* Advance the epoch and notify waiters. */ 1043 entropy_notify(); 1044 1045 /* Release the lock. */ 1046 mutex_exit(&E->lock); 1047 } 1048 1049 /* 1050 * entropy_consolidate_xc(vpool, arg2) 1051 * 1052 * Extract output from the local CPU's input pool and enter it 1053 * into a temporary pool passed as vpool. 1054 */ 1055 static void 1056 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1057 { 1058 struct entpool *pool = vpool; 1059 struct entropy_cpu *ec; 1060 uint8_t buf[ENTPOOL_CAPACITY]; 1061 uint32_t extra[7]; 1062 unsigned i = 0; 1063 int s; 1064 1065 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1066 extra[i++] = cpu_number(); 1067 extra[i++] = entropy_timer(); 1068 1069 /* 1070 * Acquire the per-CPU state, blocking soft interrupts and 1071 * discarding entropy in hard interrupts, so that we can 1072 * extract from the per-CPU pool. 1073 */ 1074 ec = percpu_getref(entropy_percpu); 1075 s = splsoftserial(); 1076 KASSERT(!ec->ec_locked); 1077 ec->ec_locked = true; 1078 __insn_barrier(); 1079 extra[i++] = entropy_timer(); 1080 1081 /* Extract the data and count it no longer pending. */ 1082 entpool_extract(ec->ec_pool, buf, sizeof buf); 1083 atomic_store_relaxed(&ec->ec_pending, 0); 1084 extra[i++] = entropy_timer(); 1085 1086 /* Release the per-CPU state. */ 1087 KASSERT(ec->ec_locked); 1088 __insn_barrier(); 1089 ec->ec_locked = false; 1090 splx(s); 1091 percpu_putref(entropy_percpu); 1092 extra[i++] = entropy_timer(); 1093 1094 /* 1095 * Copy over statistics, and enter the per-CPU extract and the 1096 * extra timing into the temporary pool, under the global lock. 1097 */ 1098 mutex_enter(&E->lock); 1099 extra[i++] = entropy_timer(); 1100 entpool_enter(pool, buf, sizeof buf); 1101 explicit_memset(buf, 0, sizeof buf); 1102 extra[i++] = entropy_timer(); 1103 KASSERT(i == __arraycount(extra)); 1104 entpool_enter(pool, extra, sizeof extra); 1105 explicit_memset(extra, 0, sizeof extra); 1106 mutex_exit(&E->lock); 1107 } 1108 1109 /* 1110 * entropy_notify() 1111 * 1112 * Caller just contributed entropy to the global pool. Advance 1113 * the entropy epoch and notify waiters. 1114 * 1115 * Caller must hold the global entropy lock. Except for the 1116 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must 1117 * have just have transitioned from partial entropy to full 1118 * entropy -- E->needed should be zero now. 1119 */ 1120 static void 1121 entropy_notify(void) 1122 { 1123 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1124 static struct timeval lasttime; /* serialized by E->lock */ 1125 unsigned epoch; 1126 1127 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1128 1129 /* 1130 * If this is the first time, print a message to the console 1131 * that we're ready so operators can compare it to the timing 1132 * of other events. 1133 */ 1134 if (__predict_false(!rnd_initial_entropy) && E->needed == 0) { 1135 printf("entropy: ready\n"); 1136 rnd_initial_entropy = 1; 1137 } 1138 1139 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1140 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1141 ratecheck(&lasttime, &interval)) { 1142 epoch = E->epoch + 1; 1143 if (epoch == 0 || epoch == (unsigned)-1) 1144 epoch = 1; 1145 atomic_store_relaxed(&E->epoch, epoch); 1146 } 1147 1148 /* Notify waiters. */ 1149 if (E->stage >= ENTROPY_WARM) { 1150 cv_broadcast(&E->cv); 1151 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1152 } 1153 1154 /* Count another notification. */ 1155 entropy_notify_evcnt.ev_count++; 1156 } 1157 1158 /* 1159 * entropy_consolidate() 1160 * 1161 * Trigger entropy consolidation and wait for it to complete. 1162 * 1163 * This should be used sparingly, not periodically -- requiring 1164 * conscious intervention by the operator or a clear policy 1165 * decision. Otherwise, the kernel will automatically consolidate 1166 * when enough entropy has been gathered into per-CPU pools to 1167 * transition to full entropy. 1168 */ 1169 void 1170 entropy_consolidate(void) 1171 { 1172 uint64_t ticket; 1173 int error; 1174 1175 KASSERT(E->stage == ENTROPY_HOT); 1176 1177 mutex_enter(&E->lock); 1178 ticket = entropy_consolidate_evcnt.ev_count; 1179 E->consolidate = true; 1180 cv_broadcast(&E->cv); 1181 while (ticket == entropy_consolidate_evcnt.ev_count) { 1182 error = cv_wait_sig(&E->cv, &E->lock); 1183 if (error) 1184 break; 1185 } 1186 mutex_exit(&E->lock); 1187 } 1188 1189 /* 1190 * sysctl -w kern.entropy.consolidate=1 1191 * 1192 * Trigger entropy consolidation and wait for it to complete. 1193 * Writable only by superuser. This, writing to /dev/random, and 1194 * ioctl(RNDADDDATA) are the only ways for the system to 1195 * consolidate entropy if the operator knows something the kernel 1196 * doesn't about how unpredictable the pending entropy pools are. 1197 */ 1198 static int 1199 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1200 { 1201 struct sysctlnode node = *rnode; 1202 int arg; 1203 int error; 1204 1205 KASSERT(E->stage == ENTROPY_HOT); 1206 1207 node.sysctl_data = &arg; 1208 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1209 if (error || newp == NULL) 1210 return error; 1211 if (arg) 1212 entropy_consolidate(); 1213 1214 return error; 1215 } 1216 1217 /* 1218 * sysctl -w kern.entropy.gather=1 1219 * 1220 * Trigger gathering entropy from all on-demand sources, and wait 1221 * for synchronous sources (but not asynchronous sources) to 1222 * complete. Writable only by superuser. 1223 */ 1224 static int 1225 sysctl_entropy_gather(SYSCTLFN_ARGS) 1226 { 1227 struct sysctlnode node = *rnode; 1228 int arg; 1229 int error; 1230 1231 KASSERT(E->stage == ENTROPY_HOT); 1232 1233 node.sysctl_data = &arg; 1234 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1235 if (error || newp == NULL) 1236 return error; 1237 if (arg) { 1238 mutex_enter(&E->lock); 1239 entropy_request(ENTROPY_CAPACITY); 1240 mutex_exit(&E->lock); 1241 } 1242 1243 return 0; 1244 } 1245 1246 /* 1247 * entropy_extract(buf, len, flags) 1248 * 1249 * Extract len bytes from the global entropy pool into buf. 1250 * 1251 * Flags may have: 1252 * 1253 * ENTROPY_WAIT Wait for entropy if not available yet. 1254 * ENTROPY_SIG Allow interruption by a signal during wait. 1255 * ENTROPY_HARDFAIL Either fill the buffer with full entropy, 1256 * or fail without filling it at all. 1257 * 1258 * Return zero on success, or error on failure: 1259 * 1260 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1261 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1262 * 1263 * If ENTROPY_WAIT is set, allowed only in thread context. If 1264 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's 1265 * awfully high... Do we really need it in hard interrupts? This 1266 * arises from use of cprng_strong(9).) 1267 */ 1268 int 1269 entropy_extract(void *buf, size_t len, int flags) 1270 { 1271 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1272 static struct timeval lasttime; /* serialized by E->lock */ 1273 int error; 1274 1275 if (ISSET(flags, ENTROPY_WAIT)) { 1276 ASSERT_SLEEPABLE(); 1277 KASSERTMSG(E->stage >= ENTROPY_WARM, 1278 "can't wait for entropy until warm"); 1279 } 1280 1281 /* Acquire the global lock to get at the global pool. */ 1282 if (E->stage >= ENTROPY_WARM) 1283 mutex_enter(&E->lock); 1284 1285 /* Count up request for entropy in interrupt context. */ 1286 if (cpu_intr_p()) 1287 entropy_extract_intr_evcnt.ev_count++; 1288 1289 /* Wait until there is enough entropy in the system. */ 1290 error = 0; 1291 while (E->needed) { 1292 /* Ask for more, synchronously if possible. */ 1293 entropy_request(len); 1294 1295 /* If we got enough, we're done. */ 1296 if (E->needed == 0) { 1297 KASSERT(error == 0); 1298 break; 1299 } 1300 1301 /* If not waiting, stop here. */ 1302 if (!ISSET(flags, ENTROPY_WAIT)) { 1303 error = EWOULDBLOCK; 1304 break; 1305 } 1306 1307 /* Wait for some entropy to come in and try again. */ 1308 KASSERT(E->stage >= ENTROPY_WARM); 1309 printf("entropy: pid %d (%s) blocking due to lack of entropy\n", 1310 curproc->p_pid, curproc->p_comm); 1311 1312 if (ISSET(flags, ENTROPY_SIG)) { 1313 error = cv_wait_sig(&E->cv, &E->lock); 1314 if (error) 1315 break; 1316 } else { 1317 cv_wait(&E->cv, &E->lock); 1318 } 1319 } 1320 1321 /* 1322 * Count failure -- but fill the buffer nevertheless, unless 1323 * the caller specified ENTROPY_HARDFAIL. 1324 */ 1325 if (error) { 1326 if (ISSET(flags, ENTROPY_HARDFAIL)) 1327 goto out; 1328 entropy_extract_fail_evcnt.ev_count++; 1329 } 1330 1331 /* 1332 * Report a warning if we have never yet reached full entropy. 1333 * This is the only case where we consider entropy to be 1334 * `depleted' without kern.entropy.depletion enabled -- when we 1335 * only have partial entropy, an adversary may be able to 1336 * narrow the state of the pool down to a small number of 1337 * possibilities; the output then enables them to confirm a 1338 * guess, reducing its entropy from the adversary's perspective 1339 * to zero. 1340 */ 1341 if (__predict_false(E->epoch == (unsigned)-1)) { 1342 if (ratecheck(&lasttime, &interval)) 1343 printf("entropy: WARNING:" 1344 " extracting entropy too early\n"); 1345 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); 1346 } 1347 1348 /* Extract data from the pool, and `deplete' if we're doing that. */ 1349 entpool_extract(&E->pool, buf, len); 1350 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1351 error == 0) { 1352 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1353 1354 atomic_store_relaxed(&E->needed, 1355 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); 1356 entropy_deplete_evcnt.ev_count++; 1357 } 1358 1359 out: /* Release the global lock and return the error. */ 1360 if (E->stage >= ENTROPY_WARM) 1361 mutex_exit(&E->lock); 1362 return error; 1363 } 1364 1365 /* 1366 * entropy_poll(events) 1367 * 1368 * Return the subset of events ready, and if it is not all of 1369 * events, record curlwp as waiting for entropy. 1370 */ 1371 int 1372 entropy_poll(int events) 1373 { 1374 int revents = 0; 1375 1376 KASSERT(E->stage >= ENTROPY_WARM); 1377 1378 /* Always ready for writing. */ 1379 revents |= events & (POLLOUT|POLLWRNORM); 1380 1381 /* Narrow it down to reads. */ 1382 events &= POLLIN|POLLRDNORM; 1383 if (events == 0) 1384 return revents; 1385 1386 /* 1387 * If we have reached full entropy and we're not depleting 1388 * entropy, we are forever ready. 1389 */ 1390 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 1391 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1392 return revents | events; 1393 1394 /* 1395 * Otherwise, check whether we need entropy under the lock. If 1396 * we don't, we're ready; if we do, add ourselves to the queue. 1397 */ 1398 mutex_enter(&E->lock); 1399 if (E->needed == 0) 1400 revents |= events; 1401 else 1402 selrecord(curlwp, &E->selq); 1403 mutex_exit(&E->lock); 1404 1405 return revents; 1406 } 1407 1408 /* 1409 * filt_entropy_read_detach(kn) 1410 * 1411 * struct filterops::f_detach callback for entropy read events: 1412 * remove kn from the list of waiters. 1413 */ 1414 static void 1415 filt_entropy_read_detach(struct knote *kn) 1416 { 1417 1418 KASSERT(E->stage >= ENTROPY_WARM); 1419 1420 mutex_enter(&E->lock); 1421 selremove_knote(&E->selq, kn); 1422 mutex_exit(&E->lock); 1423 } 1424 1425 /* 1426 * filt_entropy_read_event(kn, hint) 1427 * 1428 * struct filterops::f_event callback for entropy read events: 1429 * poll for entropy. Caller must hold the global entropy lock if 1430 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1431 */ 1432 static int 1433 filt_entropy_read_event(struct knote *kn, long hint) 1434 { 1435 int ret; 1436 1437 KASSERT(E->stage >= ENTROPY_WARM); 1438 1439 /* Acquire the lock, if caller is outside entropy subsystem. */ 1440 if (hint == NOTE_SUBMIT) 1441 KASSERT(mutex_owned(&E->lock)); 1442 else 1443 mutex_enter(&E->lock); 1444 1445 /* 1446 * If we still need entropy, can't read anything; if not, can 1447 * read arbitrarily much. 1448 */ 1449 if (E->needed != 0) { 1450 ret = 0; 1451 } else { 1452 if (atomic_load_relaxed(&entropy_depletion)) 1453 kn->kn_data = ENTROPY_CAPACITY*NBBY; 1454 else 1455 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1456 ret = 1; 1457 } 1458 1459 /* Release the lock, if caller is outside entropy subsystem. */ 1460 if (hint == NOTE_SUBMIT) 1461 KASSERT(mutex_owned(&E->lock)); 1462 else 1463 mutex_exit(&E->lock); 1464 1465 return ret; 1466 } 1467 1468 static const struct filterops entropy_read_filtops = { 1469 .f_isfd = 1, /* XXX Makes sense only for /dev/u?random. */ 1470 .f_attach = NULL, 1471 .f_detach = filt_entropy_read_detach, 1472 .f_event = filt_entropy_read_event, 1473 }; 1474 1475 /* 1476 * entropy_kqfilter(kn) 1477 * 1478 * Register kn to receive entropy event notifications. May be 1479 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1480 */ 1481 int 1482 entropy_kqfilter(struct knote *kn) 1483 { 1484 1485 KASSERT(E->stage >= ENTROPY_WARM); 1486 1487 switch (kn->kn_filter) { 1488 case EVFILT_READ: 1489 /* Enter into the global select queue. */ 1490 mutex_enter(&E->lock); 1491 kn->kn_fop = &entropy_read_filtops; 1492 selrecord_knote(&E->selq, kn); 1493 mutex_exit(&E->lock); 1494 return 0; 1495 case EVFILT_WRITE: 1496 /* Can always dump entropy into the system. */ 1497 kn->kn_fop = &seltrue_filtops; 1498 return 0; 1499 default: 1500 return EINVAL; 1501 } 1502 } 1503 1504 /* 1505 * rndsource_setcb(rs, get, getarg) 1506 * 1507 * Set the request callback for the entropy source rs, if it can 1508 * provide entropy on demand. Must precede rnd_attach_source. 1509 */ 1510 void 1511 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1512 void *getarg) 1513 { 1514 1515 rs->get = get; 1516 rs->getarg = getarg; 1517 } 1518 1519 /* 1520 * rnd_attach_source(rs, name, type, flags) 1521 * 1522 * Attach the entropy source rs. Must be done after 1523 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1524 */ 1525 void 1526 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1527 uint32_t flags) 1528 { 1529 uint32_t extra[4]; 1530 unsigned i = 0; 1531 1532 /* Grab cycle counter to mix extra into the pool. */ 1533 extra[i++] = entropy_timer(); 1534 1535 /* 1536 * Apply some standard flags: 1537 * 1538 * - We do not bother with network devices by default, for 1539 * hysterical raisins (perhaps: because it is often the case 1540 * that an adversary can influence network packet timings). 1541 */ 1542 switch (type) { 1543 case RND_TYPE_NET: 1544 flags |= RND_FLAG_NO_COLLECT; 1545 break; 1546 } 1547 1548 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1549 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1550 1551 /* Initialize the random source. */ 1552 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1553 strlcpy(rs->name, name, sizeof(rs->name)); 1554 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 1555 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 1556 rs->total = 0; 1557 rs->type = type; 1558 rs->flags = flags; 1559 if (E->stage >= ENTROPY_WARM) 1560 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1561 extra[i++] = entropy_timer(); 1562 1563 /* Wire it into the global list of random sources. */ 1564 if (E->stage >= ENTROPY_WARM) 1565 mutex_enter(&E->lock); 1566 LIST_INSERT_HEAD(&E->sources, rs, list); 1567 if (E->stage >= ENTROPY_WARM) 1568 mutex_exit(&E->lock); 1569 extra[i++] = entropy_timer(); 1570 1571 /* Request that it provide entropy ASAP, if we can. */ 1572 if (ISSET(flags, RND_FLAG_HASCB)) 1573 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1574 extra[i++] = entropy_timer(); 1575 1576 /* Mix the extra into the pool. */ 1577 KASSERT(i == __arraycount(extra)); 1578 entropy_enter(extra, sizeof extra, 0); 1579 explicit_memset(extra, 0, sizeof extra); 1580 } 1581 1582 /* 1583 * rnd_detach_source(rs) 1584 * 1585 * Detach the entropy source rs. May sleep waiting for users to 1586 * drain. Further use is not allowed. 1587 */ 1588 void 1589 rnd_detach_source(struct krndsource *rs) 1590 { 1591 1592 /* 1593 * If we're cold (shouldn't happen, but hey), just remove it 1594 * from the list -- there's nothing allocated. 1595 */ 1596 if (E->stage == ENTROPY_COLD) { 1597 LIST_REMOVE(rs, list); 1598 return; 1599 } 1600 1601 /* We may have to wait for entropy_request. */ 1602 ASSERT_SLEEPABLE(); 1603 1604 /* Wait until the source list is not in use, and remove it. */ 1605 mutex_enter(&E->lock); 1606 while (E->sourcelock) 1607 cv_wait(&E->sourcelock_cv, &E->lock); 1608 LIST_REMOVE(rs, list); 1609 mutex_exit(&E->lock); 1610 1611 /* Free the per-CPU data. */ 1612 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1613 } 1614 1615 /* 1616 * rnd_lock_sources() 1617 * 1618 * Prevent changes to the list of rndsources while we iterate it. 1619 * Interruptible. Caller must hold the global entropy lock. If 1620 * successful, no rndsource will go away until rnd_unlock_sources 1621 * even while the caller releases the global entropy lock. 1622 */ 1623 static int 1624 rnd_lock_sources(void) 1625 { 1626 int error; 1627 1628 KASSERT(mutex_owned(&E->lock)); 1629 1630 while (E->sourcelock) { 1631 error = cv_wait_sig(&E->sourcelock_cv, &E->lock); 1632 if (error) 1633 return error; 1634 } 1635 1636 E->sourcelock = curlwp; 1637 return 0; 1638 } 1639 1640 /* 1641 * rnd_trylock_sources() 1642 * 1643 * Try to lock the list of sources, but if it's already locked, 1644 * fail. Caller must hold the global entropy lock. If 1645 * successful, no rndsource will go away until rnd_unlock_sources 1646 * even while the caller releases the global entropy lock. 1647 */ 1648 static bool 1649 rnd_trylock_sources(void) 1650 { 1651 1652 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1653 1654 if (E->sourcelock) 1655 return false; 1656 E->sourcelock = curlwp; 1657 return true; 1658 } 1659 1660 /* 1661 * rnd_unlock_sources() 1662 * 1663 * Unlock the list of sources after rnd_lock_sources or 1664 * rnd_trylock_sources. Caller must hold the global entropy lock. 1665 */ 1666 static void 1667 rnd_unlock_sources(void) 1668 { 1669 1670 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1671 1672 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1673 curlwp, E->sourcelock); 1674 E->sourcelock = NULL; 1675 if (E->stage >= ENTROPY_WARM) 1676 cv_signal(&E->sourcelock_cv); 1677 } 1678 1679 /* 1680 * rnd_sources_locked() 1681 * 1682 * True if we hold the list of rndsources locked, for diagnostic 1683 * assertions. 1684 */ 1685 static bool __diagused 1686 rnd_sources_locked(void) 1687 { 1688 1689 return E->sourcelock == curlwp; 1690 } 1691 1692 /* 1693 * entropy_request(nbytes) 1694 * 1695 * Request nbytes bytes of entropy from all sources in the system. 1696 * OK if we overdo it. Caller must hold the global entropy lock; 1697 * will release and re-acquire it. 1698 */ 1699 static void 1700 entropy_request(size_t nbytes) 1701 { 1702 struct krndsource *rs; 1703 1704 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1705 1706 /* 1707 * If there is a request in progress, let it proceed. 1708 * Otherwise, note that a request is in progress to avoid 1709 * reentry and to block rnd_detach_source until we're done. 1710 */ 1711 if (!rnd_trylock_sources()) 1712 return; 1713 entropy_request_evcnt.ev_count++; 1714 1715 /* Clamp to the maximum reasonable request. */ 1716 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1717 1718 /* Walk the list of sources. */ 1719 LIST_FOREACH(rs, &E->sources, list) { 1720 /* Skip sources without callbacks. */ 1721 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1722 continue; 1723 1724 /* 1725 * Skip sources that are disabled altogether -- we 1726 * would just ignore their samples anyway. 1727 */ 1728 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 1729 continue; 1730 1731 /* Drop the lock while we call the callback. */ 1732 if (E->stage >= ENTROPY_WARM) 1733 mutex_exit(&E->lock); 1734 (*rs->get)(nbytes, rs->getarg); 1735 if (E->stage >= ENTROPY_WARM) 1736 mutex_enter(&E->lock); 1737 } 1738 1739 /* Notify rnd_detach_source that the request is done. */ 1740 rnd_unlock_sources(); 1741 } 1742 1743 /* 1744 * rnd_add_uint32(rs, value) 1745 * 1746 * Enter 32 bits of data from an entropy source into the pool. 1747 * 1748 * If rs is NULL, may not be called from interrupt context. 1749 * 1750 * If rs is non-NULL, may be called from any context. May drop 1751 * data if called from interrupt context. 1752 */ 1753 void 1754 rnd_add_uint32(struct krndsource *rs, uint32_t value) 1755 { 1756 1757 rnd_add_data(rs, &value, sizeof value, 0); 1758 } 1759 1760 void 1761 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 1762 { 1763 1764 rnd_add_data(rs, &value, sizeof value, 0); 1765 } 1766 1767 void 1768 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 1769 { 1770 1771 rnd_add_data(rs, &value, sizeof value, 0); 1772 } 1773 1774 /* 1775 * rnd_add_data(rs, buf, len, entropybits) 1776 * 1777 * Enter data from an entropy source into the pool, with a 1778 * driver's estimate of how much entropy the physical source of 1779 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 1780 * estimate and treat it as zero. 1781 * 1782 * If rs is NULL, may not be called from interrupt context. 1783 * 1784 * If rs is non-NULL, may be called from any context. May drop 1785 * data if called from interrupt context. 1786 */ 1787 void 1788 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 1789 uint32_t entropybits) 1790 { 1791 uint32_t extra; 1792 uint32_t flags; 1793 1794 KASSERTMSG(howmany(entropybits, NBBY) <= len, 1795 "%s: impossible entropy rate:" 1796 " %"PRIu32" bits in %"PRIu32"-byte string", 1797 rs ? rs->name : "(anonymous)", entropybits, len); 1798 1799 /* If there's no rndsource, just enter the data and time now. */ 1800 if (rs == NULL) { 1801 entropy_enter(buf, len, entropybits); 1802 extra = entropy_timer(); 1803 entropy_enter(&extra, sizeof extra, 0); 1804 explicit_memset(&extra, 0, sizeof extra); 1805 return; 1806 } 1807 1808 /* Load a snapshot of the flags. Ioctl may change them under us. */ 1809 flags = atomic_load_relaxed(&rs->flags); 1810 1811 /* 1812 * Skip if: 1813 * - we're not collecting entropy, or 1814 * - the operator doesn't want to collect entropy from this, or 1815 * - neither data nor timings are being collected from this. 1816 */ 1817 if (!atomic_load_relaxed(&entropy_collection) || 1818 ISSET(flags, RND_FLAG_NO_COLLECT) || 1819 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 1820 return; 1821 1822 /* If asked, ignore the estimate. */ 1823 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 1824 entropybits = 0; 1825 1826 /* If we are collecting data, enter them. */ 1827 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) 1828 rnd_add_data_1(rs, buf, len, entropybits, 1829 RND_FLAG_COLLECT_VALUE); 1830 1831 /* If we are collecting timings, enter one. */ 1832 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 1833 extra = entropy_timer(); 1834 rnd_add_data_1(rs, &extra, sizeof extra, 0, 1835 RND_FLAG_COLLECT_TIME); 1836 } 1837 } 1838 1839 static unsigned 1840 add_sat(unsigned a, unsigned b) 1841 { 1842 unsigned c = a + b; 1843 1844 return (c < a ? UINT_MAX : c); 1845 } 1846 1847 /* 1848 * rnd_add_data_1(rs, buf, len, entropybits, flag) 1849 * 1850 * Internal subroutine to call either entropy_enter_intr, if we're 1851 * in interrupt context, or entropy_enter if not, and to count the 1852 * entropy in an rndsource. 1853 */ 1854 static void 1855 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 1856 uint32_t entropybits, uint32_t flag) 1857 { 1858 bool fullyused; 1859 1860 /* 1861 * If we're in interrupt context, use entropy_enter_intr and 1862 * take note of whether it consumed the full sample; if not, 1863 * use entropy_enter, which always consumes the full sample. 1864 */ 1865 if (curlwp && cpu_intr_p()) { 1866 fullyused = entropy_enter_intr(buf, len, entropybits); 1867 } else { 1868 entropy_enter(buf, len, entropybits); 1869 fullyused = true; 1870 } 1871 1872 /* 1873 * If we used the full sample, note how many bits were 1874 * contributed from this source. 1875 */ 1876 if (fullyused) { 1877 if (E->stage < ENTROPY_HOT) { 1878 if (E->stage >= ENTROPY_WARM) 1879 mutex_enter(&E->lock); 1880 rs->total = add_sat(rs->total, entropybits); 1881 switch (flag) { 1882 case RND_FLAG_COLLECT_TIME: 1883 rs->time_delta.insamples = 1884 add_sat(rs->time_delta.insamples, 1); 1885 break; 1886 case RND_FLAG_COLLECT_VALUE: 1887 rs->value_delta.insamples = 1888 add_sat(rs->value_delta.insamples, 1); 1889 break; 1890 } 1891 if (E->stage >= ENTROPY_WARM) 1892 mutex_exit(&E->lock); 1893 } else { 1894 struct rndsource_cpu *rc = percpu_getref(rs->state); 1895 1896 atomic_store_relaxed(&rc->rc_entropybits, 1897 add_sat(rc->rc_entropybits, entropybits)); 1898 switch (flag) { 1899 case RND_FLAG_COLLECT_TIME: 1900 atomic_store_relaxed(&rc->rc_timesamples, 1901 add_sat(rc->rc_timesamples, 1)); 1902 break; 1903 case RND_FLAG_COLLECT_VALUE: 1904 atomic_store_relaxed(&rc->rc_datasamples, 1905 add_sat(rc->rc_datasamples, 1)); 1906 break; 1907 } 1908 percpu_putref(rs->state); 1909 } 1910 } 1911 } 1912 1913 /* 1914 * rnd_add_data_sync(rs, buf, len, entropybits) 1915 * 1916 * Same as rnd_add_data. Originally used in rndsource callbacks, 1917 * to break an unnecessary cycle; no longer really needed. 1918 */ 1919 void 1920 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 1921 uint32_t entropybits) 1922 { 1923 1924 rnd_add_data(rs, buf, len, entropybits); 1925 } 1926 1927 /* 1928 * rndsource_entropybits(rs) 1929 * 1930 * Return approximately the number of bits of entropy that have 1931 * been contributed via rs so far. Approximate if other CPUs may 1932 * be calling rnd_add_data concurrently. 1933 */ 1934 static unsigned 1935 rndsource_entropybits(struct krndsource *rs) 1936 { 1937 unsigned nbits = rs->total; 1938 1939 KASSERT(E->stage >= ENTROPY_WARM); 1940 KASSERT(rnd_sources_locked()); 1941 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 1942 return nbits; 1943 } 1944 1945 static void 1946 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1947 { 1948 struct rndsource_cpu *rc = ptr; 1949 unsigned *nbitsp = cookie; 1950 unsigned cpu_nbits; 1951 1952 cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits); 1953 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 1954 } 1955 1956 /* 1957 * rndsource_to_user(rs, urs) 1958 * 1959 * Copy a description of rs out to urs for userland. 1960 */ 1961 static void 1962 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 1963 { 1964 1965 KASSERT(E->stage >= ENTROPY_WARM); 1966 KASSERT(rnd_sources_locked()); 1967 1968 /* Avoid kernel memory disclosure. */ 1969 memset(urs, 0, sizeof(*urs)); 1970 1971 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 1972 strlcpy(urs->name, rs->name, sizeof(urs->name)); 1973 urs->total = rndsource_entropybits(rs); 1974 urs->type = rs->type; 1975 urs->flags = atomic_load_relaxed(&rs->flags); 1976 } 1977 1978 /* 1979 * rndsource_to_user_est(rs, urse) 1980 * 1981 * Copy a description of rs and estimation statistics out to urse 1982 * for userland. 1983 */ 1984 static void 1985 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 1986 { 1987 1988 KASSERT(E->stage >= ENTROPY_WARM); 1989 KASSERT(rnd_sources_locked()); 1990 1991 /* Avoid kernel memory disclosure. */ 1992 memset(urse, 0, sizeof(*urse)); 1993 1994 /* Copy out the rndsource description. */ 1995 rndsource_to_user(rs, &urse->rt); 1996 1997 /* Gather the statistics. */ 1998 urse->dt_samples = rs->time_delta.insamples; 1999 urse->dt_total = 0; 2000 urse->dv_samples = rs->value_delta.insamples; 2001 urse->dv_total = urse->rt.total; 2002 percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse); 2003 } 2004 2005 static void 2006 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci) 2007 { 2008 struct rndsource_cpu *rc = ptr; 2009 rndsource_est_t *urse = cookie; 2010 2011 urse->dt_samples = add_sat(urse->dt_samples, 2012 atomic_load_relaxed(&rc->rc_timesamples)); 2013 urse->dv_samples = add_sat(urse->dv_samples, 2014 atomic_load_relaxed(&rc->rc_datasamples)); 2015 } 2016 2017 /* 2018 * entropy_reset_xc(arg1, arg2) 2019 * 2020 * Reset the current CPU's pending entropy to zero. 2021 */ 2022 static void 2023 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 2024 { 2025 uint32_t extra = entropy_timer(); 2026 struct entropy_cpu *ec; 2027 int s; 2028 2029 /* 2030 * Acquire the per-CPU state, blocking soft interrupts and 2031 * causing hard interrupts to drop samples on the floor. 2032 */ 2033 ec = percpu_getref(entropy_percpu); 2034 s = splsoftserial(); 2035 KASSERT(!ec->ec_locked); 2036 ec->ec_locked = true; 2037 __insn_barrier(); 2038 2039 /* Zero the pending count and enter a cycle count for fun. */ 2040 ec->ec_pending = 0; 2041 entpool_enter(ec->ec_pool, &extra, sizeof extra); 2042 2043 /* Release the per-CPU state. */ 2044 KASSERT(ec->ec_locked); 2045 __insn_barrier(); 2046 ec->ec_locked = false; 2047 splx(s); 2048 percpu_putref(entropy_percpu); 2049 } 2050 2051 /* 2052 * entropy_ioctl(cmd, data) 2053 * 2054 * Handle various /dev/random ioctl queries. 2055 */ 2056 int 2057 entropy_ioctl(unsigned long cmd, void *data) 2058 { 2059 struct krndsource *rs; 2060 bool privileged; 2061 int error; 2062 2063 KASSERT(E->stage >= ENTROPY_WARM); 2064 2065 /* Verify user's authorization to perform the ioctl. */ 2066 switch (cmd) { 2067 case RNDGETENTCNT: 2068 case RNDGETPOOLSTAT: 2069 case RNDGETSRCNUM: 2070 case RNDGETSRCNAME: 2071 case RNDGETESTNUM: 2072 case RNDGETESTNAME: 2073 error = kauth_authorize_device(curlwp->l_cred, 2074 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 2075 break; 2076 case RNDCTL: 2077 error = kauth_authorize_device(curlwp->l_cred, 2078 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2079 break; 2080 case RNDADDDATA: 2081 error = kauth_authorize_device(curlwp->l_cred, 2082 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2083 /* Ascertain whether the user's inputs should be counted. */ 2084 if (kauth_authorize_device(curlwp->l_cred, 2085 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2086 NULL, NULL, NULL, NULL) == 0) 2087 privileged = true; 2088 break; 2089 default: { 2090 /* 2091 * XXX Hack to avoid changing module ABI so this can be 2092 * pulled up. Later, we can just remove the argument. 2093 */ 2094 static const struct fileops fops = { 2095 .fo_ioctl = rnd_system_ioctl, 2096 }; 2097 struct file f = { 2098 .f_ops = &fops, 2099 }; 2100 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2101 enosys(), error); 2102 #if defined(_LP64) 2103 if (error == ENOSYS) 2104 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2105 enosys(), error); 2106 #endif 2107 if (error == ENOSYS) 2108 error = ENOTTY; 2109 break; 2110 } 2111 } 2112 2113 /* If anything went wrong with authorization, stop here. */ 2114 if (error) 2115 return error; 2116 2117 /* Dispatch on the command. */ 2118 switch (cmd) { 2119 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2120 uint32_t *countp = data; 2121 2122 mutex_enter(&E->lock); 2123 *countp = ENTROPY_CAPACITY*NBBY - E->needed; 2124 mutex_exit(&E->lock); 2125 2126 break; 2127 } 2128 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2129 rndpoolstat_t *pstat = data; 2130 2131 mutex_enter(&E->lock); 2132 2133 /* parameters */ 2134 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2135 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ 2136 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2137 2138 /* state */ 2139 pstat->added = 0; /* XXX total entropy_enter count */ 2140 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; 2141 pstat->removed = 0; /* XXX total entropy_extract count */ 2142 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2143 pstat->generated = 0; /* XXX bits of data...fabricated? */ 2144 2145 mutex_exit(&E->lock); 2146 break; 2147 } 2148 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2149 rndstat_t *stat = data; 2150 uint32_t start = 0, i = 0; 2151 2152 /* Skip if none requested; fail if too many requested. */ 2153 if (stat->count == 0) 2154 break; 2155 if (stat->count > RND_MAXSTATCOUNT) 2156 return EINVAL; 2157 2158 /* 2159 * Under the lock, find the first one, copy out as many 2160 * as requested, and report how many we copied out. 2161 */ 2162 mutex_enter(&E->lock); 2163 error = rnd_lock_sources(); 2164 if (error) { 2165 mutex_exit(&E->lock); 2166 return error; 2167 } 2168 LIST_FOREACH(rs, &E->sources, list) { 2169 if (start++ == stat->start) 2170 break; 2171 } 2172 while (i < stat->count && rs != NULL) { 2173 mutex_exit(&E->lock); 2174 rndsource_to_user(rs, &stat->source[i++]); 2175 mutex_enter(&E->lock); 2176 rs = LIST_NEXT(rs, list); 2177 } 2178 KASSERT(i <= stat->count); 2179 stat->count = i; 2180 rnd_unlock_sources(); 2181 mutex_exit(&E->lock); 2182 break; 2183 } 2184 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2185 rndstat_est_t *estat = data; 2186 uint32_t start = 0, i = 0; 2187 2188 /* Skip if none requested; fail if too many requested. */ 2189 if (estat->count == 0) 2190 break; 2191 if (estat->count > RND_MAXSTATCOUNT) 2192 return EINVAL; 2193 2194 /* 2195 * Under the lock, find the first one, copy out as many 2196 * as requested, and report how many we copied out. 2197 */ 2198 mutex_enter(&E->lock); 2199 error = rnd_lock_sources(); 2200 if (error) { 2201 mutex_exit(&E->lock); 2202 return error; 2203 } 2204 LIST_FOREACH(rs, &E->sources, list) { 2205 if (start++ == estat->start) 2206 break; 2207 } 2208 while (i < estat->count && rs != NULL) { 2209 mutex_exit(&E->lock); 2210 rndsource_to_user_est(rs, &estat->source[i++]); 2211 mutex_enter(&E->lock); 2212 rs = LIST_NEXT(rs, list); 2213 } 2214 KASSERT(i <= estat->count); 2215 estat->count = i; 2216 rnd_unlock_sources(); 2217 mutex_exit(&E->lock); 2218 break; 2219 } 2220 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2221 rndstat_name_t *nstat = data; 2222 const size_t n = sizeof(rs->name); 2223 2224 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2225 2226 /* 2227 * Under the lock, search by name. If found, copy it 2228 * out; if not found, fail with ENOENT. 2229 */ 2230 mutex_enter(&E->lock); 2231 error = rnd_lock_sources(); 2232 if (error) { 2233 mutex_exit(&E->lock); 2234 return error; 2235 } 2236 LIST_FOREACH(rs, &E->sources, list) { 2237 if (strncmp(rs->name, nstat->name, n) == 0) 2238 break; 2239 } 2240 if (rs != NULL) { 2241 mutex_exit(&E->lock); 2242 rndsource_to_user(rs, &nstat->source); 2243 mutex_enter(&E->lock); 2244 } else { 2245 error = ENOENT; 2246 } 2247 rnd_unlock_sources(); 2248 mutex_exit(&E->lock); 2249 break; 2250 } 2251 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2252 rndstat_est_name_t *enstat = data; 2253 const size_t n = sizeof(rs->name); 2254 2255 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2256 2257 /* 2258 * Under the lock, search by name. If found, copy it 2259 * out; if not found, fail with ENOENT. 2260 */ 2261 mutex_enter(&E->lock); 2262 error = rnd_lock_sources(); 2263 if (error) { 2264 mutex_exit(&E->lock); 2265 return error; 2266 } 2267 LIST_FOREACH(rs, &E->sources, list) { 2268 if (strncmp(rs->name, enstat->name, n) == 0) 2269 break; 2270 } 2271 if (rs != NULL) { 2272 mutex_exit(&E->lock); 2273 rndsource_to_user_est(rs, &enstat->source); 2274 mutex_enter(&E->lock); 2275 } else { 2276 error = ENOENT; 2277 } 2278 rnd_unlock_sources(); 2279 mutex_exit(&E->lock); 2280 break; 2281 } 2282 case RNDCTL: { /* Modify entropy source flags. */ 2283 rndctl_t *rndctl = data; 2284 const size_t n = sizeof(rs->name); 2285 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2286 uint32_t flags; 2287 bool reset = false, request = false; 2288 2289 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2290 2291 /* Whitelist the flags that user can change. */ 2292 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2293 2294 /* 2295 * For each matching rndsource, either by type if 2296 * specified or by name if not, set the masked flags. 2297 */ 2298 mutex_enter(&E->lock); 2299 LIST_FOREACH(rs, &E->sources, list) { 2300 if (rndctl->type != 0xff) { 2301 if (rs->type != rndctl->type) 2302 continue; 2303 } else { 2304 if (strncmp(rs->name, rndctl->name, n) != 0) 2305 continue; 2306 } 2307 flags = rs->flags & ~rndctl->mask; 2308 flags |= rndctl->flags & rndctl->mask; 2309 if ((rs->flags & resetflags) == 0 && 2310 (flags & resetflags) != 0) 2311 reset = true; 2312 if ((rs->flags ^ flags) & resetflags) 2313 request = true; 2314 atomic_store_relaxed(&rs->flags, flags); 2315 } 2316 mutex_exit(&E->lock); 2317 2318 /* 2319 * If we disabled estimation or collection, nix all the 2320 * pending entropy and set needed to the maximum. 2321 */ 2322 if (reset) { 2323 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2324 mutex_enter(&E->lock); 2325 E->pending = 0; 2326 atomic_store_relaxed(&E->needed, 2327 ENTROPY_CAPACITY*NBBY); 2328 mutex_exit(&E->lock); 2329 } 2330 2331 /* 2332 * If we changed any of the estimation or collection 2333 * flags, request new samples from everyone -- either 2334 * to make up for what we just lost, or to get new 2335 * samples from what we just added. 2336 */ 2337 if (request) { 2338 mutex_enter(&E->lock); 2339 entropy_request(ENTROPY_CAPACITY); 2340 mutex_exit(&E->lock); 2341 } 2342 break; 2343 } 2344 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2345 rnddata_t *rdata = data; 2346 unsigned entropybits = 0; 2347 2348 if (!atomic_load_relaxed(&entropy_collection)) 2349 break; /* thanks but no thanks */ 2350 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2351 return EINVAL; 2352 2353 /* 2354 * This ioctl serves as the userland alternative a 2355 * bootloader-provided seed -- typically furnished by 2356 * /etc/rc.d/random_seed. We accept the user's entropy 2357 * claim only if 2358 * 2359 * (a) the user is privileged, and 2360 * (b) we have not entered a bootloader seed. 2361 * 2362 * under the assumption that the user may use this to 2363 * load a seed from disk that we have already loaded 2364 * from the bootloader, so we don't double-count it. 2365 */ 2366 if (privileged && rdata->entropy && rdata->len) { 2367 mutex_enter(&E->lock); 2368 if (!E->seeded) { 2369 entropybits = MIN(rdata->entropy, 2370 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2371 E->seeded = true; 2372 } 2373 mutex_exit(&E->lock); 2374 } 2375 2376 /* Enter the data and consolidate entropy. */ 2377 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2378 entropybits); 2379 entropy_consolidate(); 2380 break; 2381 } 2382 default: 2383 error = ENOTTY; 2384 } 2385 2386 /* Return any error that may have come up. */ 2387 return error; 2388 } 2389 2390 /* Legacy entry points */ 2391 2392 void 2393 rnd_seed(void *seed, size_t len) 2394 { 2395 2396 if (len != sizeof(rndsave_t)) { 2397 printf("entropy: invalid seed length: %zu," 2398 " expected sizeof(rndsave_t) = %zu\n", 2399 len, sizeof(rndsave_t)); 2400 return; 2401 } 2402 entropy_seed(seed); 2403 } 2404 2405 void 2406 rnd_init(void) 2407 { 2408 2409 entropy_init(); 2410 } 2411 2412 void 2413 rnd_init_softint(void) 2414 { 2415 2416 entropy_init_late(); 2417 } 2418 2419 int 2420 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2421 { 2422 2423 return entropy_ioctl(cmd, data); 2424 } 2425