1 /* $NetBSD: kern_entropy.c,v 1.22 2020/05/12 20:50:17 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Entropy subsystem 34 * 35 * * Each CPU maintains a per-CPU entropy pool so that gathering 36 * entropy requires no interprocessor synchronization, except 37 * early at boot when we may be scrambling to gather entropy as 38 * soon as possible. 39 * 40 * - entropy_enter gathers entropy and never drops it on the 41 * floor, at the cost of sometimes having to do cryptography. 42 * 43 * - entropy_enter_intr gathers entropy or drops it on the 44 * floor, with low latency. Work to stir the pool or kick the 45 * housekeeping thread is scheduled in soft interrupts. 46 * 47 * * entropy_enter immediately enters into the global pool if it 48 * can transition to full entropy in one swell foop. Otherwise, 49 * it defers to a housekeeping thread that consolidates entropy, 50 * but only when the CPUs collectively have full entropy, in 51 * order to mitigate iterative-guessing attacks. 52 * 53 * * The entropy housekeeping thread continues to consolidate 54 * entropy even after we think we have full entropy, in case we 55 * are wrong, but is limited to one discretionary consolidation 56 * per minute, and only when new entropy is actually coming in, 57 * to limit performance impact. 58 * 59 * * The entropy epoch is the number that changes when we 60 * transition from partial entropy to full entropy, so that 61 * users can easily determine when to reseed. This also 62 * facilitates an operator explicitly causing everything to 63 * reseed by sysctl -w kern.entropy.consolidate=1. 64 * 65 * * No entropy estimation based on the sample values, which is a 66 * contradiction in terms and a potential source of side 67 * channels. It is the responsibility of the driver author to 68 * study how predictable the physical source of input can ever 69 * be, and to furnish a lower bound on the amount of entropy it 70 * has. 71 * 72 * * Entropy depletion is available for testing (or if you're into 73 * that sort of thing), with sysctl -w kern.entropy.depletion=1; 74 * the logic to support it is small, to minimize chance of bugs. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.22 2020/05/12 20:50:17 riastradh Exp $"); 79 80 #include <sys/param.h> 81 #include <sys/types.h> 82 #include <sys/atomic.h> 83 #include <sys/compat_stub.h> 84 #include <sys/condvar.h> 85 #include <sys/cpu.h> 86 #include <sys/entropy.h> 87 #include <sys/errno.h> 88 #include <sys/evcnt.h> 89 #include <sys/event.h> 90 #include <sys/file.h> 91 #include <sys/intr.h> 92 #include <sys/kauth.h> 93 #include <sys/kernel.h> 94 #include <sys/kmem.h> 95 #include <sys/kthread.h> 96 #include <sys/module_hook.h> 97 #include <sys/mutex.h> 98 #include <sys/percpu.h> 99 #include <sys/poll.h> 100 #include <sys/queue.h> 101 #include <sys/rnd.h> /* legacy kernel API */ 102 #include <sys/rndio.h> /* userland ioctl interface */ 103 #include <sys/rndsource.h> /* kernel rndsource driver API */ 104 #include <sys/select.h> 105 #include <sys/selinfo.h> 106 #include <sys/sha1.h> /* for boot seed checksum */ 107 #include <sys/stdint.h> 108 #include <sys/sysctl.h> 109 #include <sys/systm.h> 110 #include <sys/time.h> 111 #include <sys/xcall.h> 112 113 #include <lib/libkern/entpool.h> 114 115 #include <machine/limits.h> 116 117 #ifdef __HAVE_CPU_COUNTER 118 #include <machine/cpu_counter.h> 119 #endif 120 121 /* 122 * struct entropy_cpu 123 * 124 * Per-CPU entropy state. The pool is allocated separately 125 * because percpu(9) sometimes moves per-CPU objects around 126 * without zeroing them, which would lead to unwanted copies of 127 * sensitive secrets. The evcnt is allocated separately becuase 128 * evcnt(9) assumes it stays put in memory. 129 */ 130 struct entropy_cpu { 131 struct evcnt *ec_softint_evcnt; 132 struct entpool *ec_pool; 133 unsigned ec_pending; 134 bool ec_locked; 135 }; 136 137 /* 138 * struct rndsource_cpu 139 * 140 * Per-CPU rndsource state. 141 */ 142 struct rndsource_cpu { 143 unsigned rc_nbits; /* bits of entropy added */ 144 }; 145 146 /* 147 * entropy_global (a.k.a. E for short in this file) 148 * 149 * Global entropy state. Writes protected by the global lock. 150 * Some fields, marked (A), can be read outside the lock, and are 151 * maintained with atomic_load/store_relaxed. 152 */ 153 struct { 154 kmutex_t lock; /* covers all global state */ 155 struct entpool pool; /* global pool for extraction */ 156 unsigned needed; /* (A) needed globally */ 157 unsigned pending; /* (A) pending in per-CPU pools */ 158 unsigned timestamp; /* (A) time of last consolidation */ 159 unsigned epoch; /* (A) changes when needed -> 0 */ 160 kcondvar_t cv; /* notifies state changes */ 161 struct selinfo selq; /* notifies needed -> 0 */ 162 struct lwp *sourcelock; /* lock on list of sources */ 163 LIST_HEAD(,krndsource) sources; /* list of entropy sources */ 164 enum entropy_stage { 165 ENTROPY_COLD = 0, /* single-threaded */ 166 ENTROPY_WARM, /* multi-threaded at boot before CPUs */ 167 ENTROPY_HOT, /* multi-threaded multi-CPU */ 168 } stage; 169 bool consolidate; /* kick thread to consolidate */ 170 bool seed_rndsource; /* true if seed source is attached */ 171 bool seeded; /* true if seed file already loaded */ 172 } entropy_global __cacheline_aligned = { 173 /* Fields that must be initialized when the kernel is loaded. */ 174 .needed = ENTROPY_CAPACITY*NBBY, 175 .epoch = (unsigned)-1, /* -1 means entropy never consolidated */ 176 .sources = LIST_HEAD_INITIALIZER(entropy_global.sources), 177 .stage = ENTROPY_COLD, 178 }; 179 180 #define E (&entropy_global) /* declutter */ 181 182 /* Read-mostly globals */ 183 static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ 184 static void *entropy_sih __read_mostly; /* softint handler */ 185 static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ 186 187 int rnd_initial_entropy __read_mostly; /* XXX legacy */ 188 189 static struct krndsource seed_rndsource __read_mostly; 190 191 /* 192 * Event counters 193 * 194 * Must be careful with adding these because they can serve as 195 * side channels. 196 */ 197 static struct evcnt entropy_discretionary_evcnt = 198 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); 199 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); 200 static struct evcnt entropy_immediate_evcnt = 201 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); 202 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); 203 static struct evcnt entropy_partial_evcnt = 204 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); 205 EVCNT_ATTACH_STATIC(entropy_partial_evcnt); 206 static struct evcnt entropy_consolidate_evcnt = 207 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); 208 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); 209 static struct evcnt entropy_extract_intr_evcnt = 210 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr"); 211 EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt); 212 static struct evcnt entropy_extract_fail_evcnt = 213 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); 214 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); 215 static struct evcnt entropy_request_evcnt = 216 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); 217 EVCNT_ATTACH_STATIC(entropy_request_evcnt); 218 static struct evcnt entropy_deplete_evcnt = 219 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); 220 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); 221 static struct evcnt entropy_notify_evcnt = 222 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); 223 EVCNT_ATTACH_STATIC(entropy_notify_evcnt); 224 225 /* Sysctl knobs */ 226 static bool entropy_collection = 1; 227 static bool entropy_depletion = 0; /* Silly! */ 228 229 static const struct sysctlnode *entropy_sysctlroot; 230 static struct sysctllog *entropy_sysctllog; 231 232 /* Forward declarations */ 233 static void entropy_init_cpu(void *, void *, struct cpu_info *); 234 static void entropy_fini_cpu(void *, void *, struct cpu_info *); 235 static void entropy_account_cpu(struct entropy_cpu *); 236 static void entropy_enter(const void *, size_t, unsigned); 237 static bool entropy_enter_intr(const void *, size_t, unsigned); 238 static void entropy_softintr(void *); 239 static void entropy_thread(void *); 240 static uint32_t entropy_pending(void); 241 static void entropy_pending_cpu(void *, void *, struct cpu_info *); 242 static void entropy_do_consolidate(void); 243 static void entropy_consolidate_xc(void *, void *); 244 static void entropy_notify(void); 245 static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); 246 static int sysctl_entropy_gather(SYSCTLFN_ARGS); 247 static void filt_entropy_read_detach(struct knote *); 248 static int filt_entropy_read_event(struct knote *, long); 249 static void entropy_request(size_t); 250 static void rnd_add_data_1(struct krndsource *, const void *, uint32_t, 251 uint32_t); 252 static unsigned rndsource_entropybits(struct krndsource *); 253 static void rndsource_entropybits_cpu(void *, void *, struct cpu_info *); 254 static void rndsource_to_user(struct krndsource *, rndsource_t *); 255 static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); 256 257 /* 258 * entropy_timer() 259 * 260 * Cycle counter, time counter, or anything that changes a wee bit 261 * unpredictably. 262 */ 263 static inline uint32_t 264 entropy_timer(void) 265 { 266 struct bintime bt; 267 uint32_t v; 268 269 /* If we have a CPU cycle counter, use the low 32 bits. */ 270 #ifdef __HAVE_CPU_COUNTER 271 if (__predict_true(cpu_hascounter())) 272 return cpu_counter32(); 273 #endif /* __HAVE_CPU_COUNTER */ 274 275 /* If we're cold, tough. Can't binuptime while cold. */ 276 if (__predict_false(cold)) 277 return 0; 278 279 /* Fold the 128 bits of binuptime into 32 bits. */ 280 binuptime(&bt); 281 v = bt.frac; 282 v ^= bt.frac >> 32; 283 v ^= bt.sec; 284 v ^= bt.sec >> 32; 285 return v; 286 } 287 288 static void 289 attach_seed_rndsource(void) 290 { 291 292 /* 293 * First called no later than entropy_init, while we are still 294 * single-threaded, so no need for RUN_ONCE. 295 */ 296 if (E->stage >= ENTROPY_WARM || E->seed_rndsource) 297 return; 298 rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN, 299 RND_FLAG_COLLECT_VALUE); 300 E->seed_rndsource = true; 301 } 302 303 /* 304 * entropy_init() 305 * 306 * Initialize the entropy subsystem. Panic on failure. 307 * 308 * Requires percpu(9) and sysctl(9) to be initialized. 309 */ 310 static void 311 entropy_init(void) 312 { 313 uint32_t extra[2]; 314 struct krndsource *rs; 315 unsigned i = 0; 316 317 KASSERT(E->stage == ENTROPY_COLD); 318 319 /* Grab some cycle counts early at boot. */ 320 extra[i++] = entropy_timer(); 321 322 /* Run the entropy pool cryptography self-test. */ 323 if (entpool_selftest() == -1) 324 panic("entropy pool crypto self-test failed"); 325 326 /* Create the sysctl directory. */ 327 sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, 328 CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", 329 SYSCTL_DESCR("Entropy (random number sources) options"), 330 NULL, 0, NULL, 0, 331 CTL_KERN, CTL_CREATE, CTL_EOL); 332 333 /* Create the sysctl knobs. */ 334 /* XXX These shouldn't be writable at securelevel>0. */ 335 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 336 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", 337 SYSCTL_DESCR("Automatically collect entropy from hardware"), 338 NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); 339 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 340 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", 341 SYSCTL_DESCR("`Deplete' entropy pool when observed"), 342 NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); 343 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 344 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", 345 SYSCTL_DESCR("Trigger entropy consolidation now"), 346 sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); 347 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 348 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather", 349 SYSCTL_DESCR("Trigger entropy gathering from sources now"), 350 sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL); 351 /* XXX These should maybe not be readable at securelevel>0. */ 352 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 353 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 354 "needed", SYSCTL_DESCR("Systemwide entropy deficit"), 355 NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL); 356 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 357 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 358 "pending", SYSCTL_DESCR("Entropy pending on CPUs"), 359 NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL); 360 sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, 361 CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, 362 "epoch", SYSCTL_DESCR("Entropy epoch"), 363 NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); 364 365 /* Initialize the global state for multithreaded operation. */ 366 mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); 367 cv_init(&E->cv, "entropy"); 368 selinit(&E->selq); 369 370 /* Make sure the seed source is attached. */ 371 attach_seed_rndsource(); 372 373 /* Note if the bootloader didn't provide a seed. */ 374 if (!E->seeded) 375 printf("entropy: no seed from bootloader\n"); 376 377 /* Allocate the per-CPU records for all early entropy sources. */ 378 LIST_FOREACH(rs, &E->sources, list) 379 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 380 381 /* Enter the boot cycle count to get started. */ 382 extra[i++] = entropy_timer(); 383 KASSERT(i == __arraycount(extra)); 384 entropy_enter(extra, sizeof extra, 0); 385 explicit_memset(extra, 0, sizeof extra); 386 387 /* We are now ready for multi-threaded operation. */ 388 E->stage = ENTROPY_WARM; 389 } 390 391 /* 392 * entropy_init_late() 393 * 394 * Late initialization. Panic on failure. 395 * 396 * Requires CPUs to have been detected and LWPs to have started. 397 */ 398 static void 399 entropy_init_late(void) 400 { 401 int error; 402 403 KASSERT(E->stage == ENTROPY_WARM); 404 405 /* Allocate and initialize the per-CPU state. */ 406 entropy_percpu = percpu_create(sizeof(struct entropy_cpu), 407 entropy_init_cpu, entropy_fini_cpu, NULL); 408 409 /* 410 * Establish the softint at the highest softint priority level. 411 * Must happen after CPU detection. 412 */ 413 entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 414 &entropy_softintr, NULL); 415 if (entropy_sih == NULL) 416 panic("unable to establish entropy softint"); 417 418 /* 419 * Create the entropy housekeeping thread. Must happen after 420 * lwpinit. 421 */ 422 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, 423 entropy_thread, NULL, &entropy_lwp, "entbutler"); 424 if (error) 425 panic("unable to create entropy housekeeping thread: %d", 426 error); 427 428 /* 429 * Wait until the per-CPU initialization has hit all CPUs 430 * before proceeding to mark the entropy system hot. 431 */ 432 xc_barrier(XC_HIGHPRI); 433 E->stage = ENTROPY_HOT; 434 } 435 436 /* 437 * entropy_init_cpu(ptr, cookie, ci) 438 * 439 * percpu(9) constructor for per-CPU entropy pool. 440 */ 441 static void 442 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 443 { 444 struct entropy_cpu *ec = ptr; 445 446 ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), 447 KM_SLEEP); 448 ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); 449 ec->ec_pending = 0; 450 ec->ec_locked = false; 451 452 evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, 453 ci->ci_cpuname, "entropy softint"); 454 } 455 456 /* 457 * entropy_fini_cpu(ptr, cookie, ci) 458 * 459 * percpu(9) destructor for per-CPU entropy pool. 460 */ 461 static void 462 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 463 { 464 struct entropy_cpu *ec = ptr; 465 466 /* 467 * Zero any lingering data. Disclosure of the per-CPU pool 468 * shouldn't retroactively affect the security of any keys 469 * generated, because entpool(9) erases whatever we have just 470 * drawn out of any pool, but better safe than sorry. 471 */ 472 explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); 473 474 evcnt_detach(ec->ec_softint_evcnt); 475 476 kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); 477 kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); 478 } 479 480 /* 481 * entropy_seed(seed) 482 * 483 * Seed the entropy pool with seed. Meant to be called as early 484 * as possible by the bootloader; may be called before or after 485 * entropy_init. Must be called before system reaches userland. 486 * Must be called in thread or soft interrupt context, not in hard 487 * interrupt context. Must be called at most once. 488 * 489 * Overwrites the seed in place. Caller may then free the memory. 490 */ 491 static void 492 entropy_seed(rndsave_t *seed) 493 { 494 SHA1_CTX ctx; 495 uint8_t digest[SHA1_DIGEST_LENGTH]; 496 bool seeded; 497 498 /* 499 * Verify the checksum. If the checksum fails, take the data 500 * but ignore the entropy estimate -- the file may have been 501 * incompletely written with garbage, which is harmless to add 502 * but may not be as unpredictable as alleged. 503 */ 504 SHA1Init(&ctx); 505 SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); 506 SHA1Update(&ctx, seed->data, sizeof(seed->data)); 507 SHA1Final(digest, &ctx); 508 CTASSERT(sizeof(seed->digest) == sizeof(digest)); 509 if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { 510 printf("entropy: invalid seed checksum\n"); 511 seed->entropy = 0; 512 } 513 explicit_memset(&ctx, 0, sizeof ctx); 514 explicit_memset(digest, 0, sizeof digest); 515 516 /* 517 * If the entropy is insensibly large, try byte-swapping. 518 * Otherwise assume the file is corrupted and act as though it 519 * has zero entropy. 520 */ 521 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) { 522 seed->entropy = bswap32(seed->entropy); 523 if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) 524 seed->entropy = 0; 525 } 526 527 /* Make sure the seed source is attached. */ 528 attach_seed_rndsource(); 529 530 /* Test and set E->seeded. */ 531 if (E->stage >= ENTROPY_WARM) 532 mutex_enter(&E->lock); 533 seeded = E->seeded; 534 E->seeded = (seed->entropy > 0); 535 if (E->stage >= ENTROPY_WARM) 536 mutex_exit(&E->lock); 537 538 /* 539 * If we've been seeded, may be re-entering the same seed 540 * (e.g., bootloader vs module init, or something). No harm in 541 * entering it twice, but it contributes no additional entropy. 542 */ 543 if (seeded) { 544 printf("entropy: double-seeded by bootloader\n"); 545 seed->entropy = 0; 546 } else { 547 printf("entropy: entering seed from bootloader" 548 " with %u bits of entropy\n", (unsigned)seed->entropy); 549 } 550 551 /* Enter it into the pool and promptly zero it. */ 552 rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data), 553 seed->entropy); 554 explicit_memset(seed, 0, sizeof(*seed)); 555 } 556 557 /* 558 * entropy_bootrequest() 559 * 560 * Request entropy from all sources at boot, once config is 561 * complete and interrupts are running. 562 */ 563 void 564 entropy_bootrequest(void) 565 { 566 567 KASSERT(E->stage >= ENTROPY_WARM); 568 569 /* 570 * Request enough to satisfy the maximum entropy shortage. 571 * This is harmless overkill if the bootloader provided a seed. 572 */ 573 mutex_enter(&E->lock); 574 entropy_request(ENTROPY_CAPACITY); 575 mutex_exit(&E->lock); 576 } 577 578 /* 579 * entropy_epoch() 580 * 581 * Returns the current entropy epoch. If this changes, you should 582 * reseed. If -1, means system entropy has not yet reached full 583 * entropy or been explicitly consolidated; never reverts back to 584 * -1. Never zero, so you can always use zero as an uninitialized 585 * sentinel value meaning `reseed ASAP'. 586 * 587 * Usage model: 588 * 589 * struct foo { 590 * struct crypto_prng prng; 591 * unsigned epoch; 592 * } *foo; 593 * 594 * unsigned epoch = entropy_epoch(); 595 * if (__predict_false(epoch != foo->epoch)) { 596 * uint8_t seed[32]; 597 * if (entropy_extract(seed, sizeof seed, 0) != 0) 598 * warn("no entropy"); 599 * crypto_prng_reseed(&foo->prng, seed, sizeof seed); 600 * foo->epoch = epoch; 601 * } 602 */ 603 unsigned 604 entropy_epoch(void) 605 { 606 607 /* 608 * Unsigned int, so no need for seqlock for an atomic read, but 609 * make sure we read it afresh each time. 610 */ 611 return atomic_load_relaxed(&E->epoch); 612 } 613 614 /* 615 * entropy_account_cpu(ec) 616 * 617 * Consider whether to consolidate entropy into the global pool 618 * after we just added some into the current CPU's pending pool. 619 * 620 * - If this CPU can provide enough entropy now, do so. 621 * 622 * - If this and whatever else is available on other CPUs can 623 * provide enough entropy, kick the consolidation thread. 624 * 625 * - Otherwise, do as little as possible, except maybe consolidate 626 * entropy at most once a minute. 627 * 628 * Caller must be bound to a CPU and therefore have exclusive 629 * access to ec. Will acquire and release the global lock. 630 */ 631 static void 632 entropy_account_cpu(struct entropy_cpu *ec) 633 { 634 unsigned diff; 635 636 KASSERT(E->stage == ENTROPY_HOT); 637 638 /* 639 * If there's no entropy needed, and entropy has been 640 * consolidated in the last minute, do nothing. 641 */ 642 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 643 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && 644 __predict_true((time_uptime - E->timestamp) <= 60)) 645 return; 646 647 /* If there's nothing pending, stop here. */ 648 if (ec->ec_pending == 0) 649 return; 650 651 /* Consider consolidation, under the lock. */ 652 mutex_enter(&E->lock); 653 if (E->needed != 0 && E->needed <= ec->ec_pending) { 654 /* 655 * If we have not yet attained full entropy but we can 656 * now, do so. This way we disseminate entropy 657 * promptly when it becomes available early at boot; 658 * otherwise we leave it to the entropy consolidation 659 * thread, which is rate-limited to mitigate side 660 * channels and abuse. 661 */ 662 uint8_t buf[ENTPOOL_CAPACITY]; 663 664 /* Transfer from the local pool to the global pool. */ 665 entpool_extract(ec->ec_pool, buf, sizeof buf); 666 entpool_enter(&E->pool, buf, sizeof buf); 667 atomic_store_relaxed(&ec->ec_pending, 0); 668 atomic_store_relaxed(&E->needed, 0); 669 670 /* Notify waiters that we now have full entropy. */ 671 entropy_notify(); 672 entropy_immediate_evcnt.ev_count++; 673 } else { 674 /* Record how much we can add to the global pool. */ 675 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); 676 E->pending += diff; 677 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff); 678 679 /* 680 * This should have made a difference unless we were 681 * already saturated. 682 */ 683 KASSERT(diff || E->pending == ENTROPY_CAPACITY*NBBY); 684 KASSERT(E->pending); 685 686 if (E->needed <= E->pending) { 687 /* 688 * Enough entropy between all the per-CPU 689 * pools. Wake up the housekeeping thread. 690 * 691 * If we don't need any entropy, this doesn't 692 * mean much, but it is the only time we ever 693 * gather additional entropy in case the 694 * accounting has been overly optimistic. This 695 * happens at most once a minute, so there's 696 * negligible performance cost. 697 */ 698 E->consolidate = true; 699 cv_broadcast(&E->cv); 700 if (E->needed == 0) 701 entropy_discretionary_evcnt.ev_count++; 702 } else { 703 /* Can't get full entropy. Keep gathering. */ 704 entropy_partial_evcnt.ev_count++; 705 } 706 } 707 mutex_exit(&E->lock); 708 } 709 710 /* 711 * entropy_enter_early(buf, len, nbits) 712 * 713 * Do entropy bookkeeping globally, before we have established 714 * per-CPU pools. Enter directly into the global pool in the hope 715 * that we enter enough before the first entropy_extract to thwart 716 * iterative-guessing attacks; entropy_extract will warn if not. 717 */ 718 static void 719 entropy_enter_early(const void *buf, size_t len, unsigned nbits) 720 { 721 bool notify = false; 722 723 if (E->stage >= ENTROPY_WARM) 724 mutex_enter(&E->lock); 725 726 /* Enter it into the pool. */ 727 entpool_enter(&E->pool, buf, len); 728 729 /* 730 * Decide whether to notify reseed -- we will do so if either: 731 * (a) we transition from partial entropy to full entropy, or 732 * (b) we get a batch of full entropy all at once. 733 */ 734 notify |= (E->needed && E->needed <= nbits); 735 notify |= (nbits >= ENTROPY_CAPACITY*NBBY); 736 737 /* Subtract from the needed count and notify if appropriate. */ 738 E->needed -= MIN(E->needed, nbits); 739 if (notify) { 740 entropy_notify(); 741 entropy_immediate_evcnt.ev_count++; 742 } 743 744 if (E->stage >= ENTROPY_WARM) 745 mutex_exit(&E->lock); 746 } 747 748 /* 749 * entropy_enter(buf, len, nbits) 750 * 751 * Enter len bytes of data from buf into the system's entropy 752 * pool, stirring as necessary when the internal buffer fills up. 753 * nbits is a lower bound on the number of bits of entropy in the 754 * process that led to this sample. 755 */ 756 static void 757 entropy_enter(const void *buf, size_t len, unsigned nbits) 758 { 759 struct entropy_cpu *ec; 760 uint32_t pending; 761 int s; 762 763 KASSERTMSG(!cpu_intr_p(), 764 "use entropy_enter_intr from interrupt context"); 765 KASSERTMSG(howmany(nbits, NBBY) <= len, 766 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 767 768 /* If it's too early after boot, just use entropy_enter_early. */ 769 if (__predict_false(E->stage < ENTROPY_HOT)) { 770 entropy_enter_early(buf, len, nbits); 771 return; 772 } 773 774 /* 775 * Acquire the per-CPU state, blocking soft interrupts and 776 * causing hard interrupts to drop samples on the floor. 777 */ 778 ec = percpu_getref(entropy_percpu); 779 s = splsoftserial(); 780 KASSERT(!ec->ec_locked); 781 ec->ec_locked = true; 782 __insn_barrier(); 783 784 /* Enter into the per-CPU pool. */ 785 entpool_enter(ec->ec_pool, buf, len); 786 787 /* Count up what we can add. */ 788 pending = ec->ec_pending; 789 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 790 atomic_store_relaxed(&ec->ec_pending, pending); 791 792 /* Consolidate globally if appropriate based on what we added. */ 793 entropy_account_cpu(ec); 794 795 /* Release the per-CPU state. */ 796 KASSERT(ec->ec_locked); 797 __insn_barrier(); 798 ec->ec_locked = false; 799 splx(s); 800 percpu_putref(entropy_percpu); 801 } 802 803 /* 804 * entropy_enter_intr(buf, len, nbits) 805 * 806 * Enter up to len bytes of data from buf into the system's 807 * entropy pool without stirring. nbits is a lower bound on the 808 * number of bits of entropy in the process that led to this 809 * sample. If the sample could be entered completely, assume 810 * nbits of entropy pending; otherwise assume none, since we don't 811 * know whether some parts of the sample are constant, for 812 * instance. Schedule a softint to stir the entropy pool if 813 * needed. Return true if used fully, false if truncated at all. 814 * 815 * Using this in thread context will work, but you might as well 816 * use entropy_enter in that case. 817 */ 818 static bool 819 entropy_enter_intr(const void *buf, size_t len, unsigned nbits) 820 { 821 struct entropy_cpu *ec; 822 bool fullyused = false; 823 uint32_t pending; 824 825 KASSERTMSG(howmany(nbits, NBBY) <= len, 826 "impossible entropy rate: %u bits in %zu-byte string", nbits, len); 827 828 /* If it's too early after boot, just use entropy_enter_early. */ 829 if (__predict_false(E->stage < ENTROPY_HOT)) { 830 entropy_enter_early(buf, len, nbits); 831 return true; 832 } 833 834 /* 835 * Acquire the per-CPU state. If someone is in the middle of 836 * using it, drop the sample. Otherwise, take the lock so that 837 * higher-priority interrupts will drop their samples. 838 */ 839 ec = percpu_getref(entropy_percpu); 840 if (ec->ec_locked) 841 goto out0; 842 ec->ec_locked = true; 843 __insn_barrier(); 844 845 /* 846 * Enter as much as we can into the per-CPU pool. If it was 847 * truncated, schedule a softint to stir the pool and stop. 848 */ 849 if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { 850 softint_schedule(entropy_sih); 851 goto out1; 852 } 853 fullyused = true; 854 855 /* Count up what we can contribute. */ 856 pending = ec->ec_pending; 857 pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits); 858 atomic_store_relaxed(&ec->ec_pending, pending); 859 860 /* Schedule a softint if we added anything and it matters. */ 861 if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || 862 atomic_load_relaxed(&entropy_depletion)) && 863 nbits != 0) 864 softint_schedule(entropy_sih); 865 866 out1: /* Release the per-CPU state. */ 867 KASSERT(ec->ec_locked); 868 __insn_barrier(); 869 ec->ec_locked = false; 870 out0: percpu_putref(entropy_percpu); 871 872 return fullyused; 873 } 874 875 /* 876 * entropy_softintr(cookie) 877 * 878 * Soft interrupt handler for entering entropy. Takes care of 879 * stirring the local CPU's entropy pool if it filled up during 880 * hard interrupts, and promptly crediting entropy from the local 881 * CPU's entropy pool to the global entropy pool if needed. 882 */ 883 static void 884 entropy_softintr(void *cookie) 885 { 886 struct entropy_cpu *ec; 887 888 /* 889 * Acquire the per-CPU state. Other users can lock this only 890 * while soft interrupts are blocked. Cause hard interrupts to 891 * drop samples on the floor. 892 */ 893 ec = percpu_getref(entropy_percpu); 894 KASSERT(!ec->ec_locked); 895 ec->ec_locked = true; 896 __insn_barrier(); 897 898 /* Count statistics. */ 899 ec->ec_softint_evcnt->ev_count++; 900 901 /* Stir the pool if necessary. */ 902 entpool_stir(ec->ec_pool); 903 904 /* Consolidate globally if appropriate based on what we added. */ 905 entropy_account_cpu(ec); 906 907 /* Release the per-CPU state. */ 908 KASSERT(ec->ec_locked); 909 __insn_barrier(); 910 ec->ec_locked = false; 911 percpu_putref(entropy_percpu); 912 } 913 914 /* 915 * entropy_thread(cookie) 916 * 917 * Handle any asynchronous entropy housekeeping. 918 */ 919 static void 920 entropy_thread(void *cookie) 921 { 922 bool consolidate; 923 924 for (;;) { 925 /* 926 * Wait until there's full entropy somewhere among the 927 * CPUs, as confirmed at most once per minute, or 928 * someone wants to consolidate. 929 */ 930 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) { 931 consolidate = true; 932 } else { 933 mutex_enter(&E->lock); 934 if (!E->consolidate) 935 cv_timedwait(&E->cv, &E->lock, 60*hz); 936 consolidate = E->consolidate; 937 E->consolidate = false; 938 mutex_exit(&E->lock); 939 } 940 941 if (consolidate) { 942 /* Do it. */ 943 entropy_do_consolidate(); 944 945 /* Mitigate abuse. */ 946 kpause("entropy", false, hz, NULL); 947 } 948 } 949 } 950 951 /* 952 * entropy_pending() 953 * 954 * Count up the amount of entropy pending on other CPUs. 955 */ 956 static uint32_t 957 entropy_pending(void) 958 { 959 uint32_t pending = 0; 960 961 percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending); 962 return pending; 963 } 964 965 static void 966 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci) 967 { 968 struct entropy_cpu *ec = ptr; 969 uint32_t *pendingp = cookie; 970 uint32_t cpu_pending; 971 972 cpu_pending = atomic_load_relaxed(&ec->ec_pending); 973 *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending); 974 } 975 976 /* 977 * entropy_do_consolidate() 978 * 979 * Issue a cross-call to gather entropy on all CPUs and advance 980 * the entropy epoch. 981 */ 982 static void 983 entropy_do_consolidate(void) 984 { 985 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 986 static struct timeval lasttime; /* serialized by E->lock */ 987 struct entpool pool; 988 uint8_t buf[ENTPOOL_CAPACITY]; 989 unsigned diff; 990 uint64_t ticket; 991 992 /* Gather entropy on all CPUs into a temporary pool. */ 993 memset(&pool, 0, sizeof pool); 994 ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); 995 xc_wait(ticket); 996 997 /* Acquire the lock to notify waiters. */ 998 mutex_enter(&E->lock); 999 1000 /* Count another consolidation. */ 1001 entropy_consolidate_evcnt.ev_count++; 1002 1003 /* Note when we last consolidated, i.e. now. */ 1004 E->timestamp = time_uptime; 1005 1006 /* Mix what we gathered into the global pool. */ 1007 entpool_extract(&pool, buf, sizeof buf); 1008 entpool_enter(&E->pool, buf, sizeof buf); 1009 explicit_memset(&pool, 0, sizeof pool); 1010 1011 /* Count the entropy that was gathered. */ 1012 diff = MIN(E->needed, E->pending); 1013 atomic_store_relaxed(&E->needed, E->needed - diff); 1014 E->pending -= diff; 1015 if (__predict_false(E->needed > 0)) { 1016 if (ratecheck(&lasttime, &interval)) 1017 printf("entropy: WARNING:" 1018 " consolidating less than full entropy\n"); 1019 } 1020 1021 /* Advance the epoch and notify waiters. */ 1022 entropy_notify(); 1023 1024 /* Release the lock. */ 1025 mutex_exit(&E->lock); 1026 } 1027 1028 /* 1029 * entropy_consolidate_xc(vpool, arg2) 1030 * 1031 * Extract output from the local CPU's input pool and enter it 1032 * into a temporary pool passed as vpool. 1033 */ 1034 static void 1035 entropy_consolidate_xc(void *vpool, void *arg2 __unused) 1036 { 1037 struct entpool *pool = vpool; 1038 struct entropy_cpu *ec; 1039 uint8_t buf[ENTPOOL_CAPACITY]; 1040 uint32_t extra[7]; 1041 unsigned i = 0; 1042 int s; 1043 1044 /* Grab CPU number and cycle counter to mix extra into the pool. */ 1045 extra[i++] = cpu_number(); 1046 extra[i++] = entropy_timer(); 1047 1048 /* 1049 * Acquire the per-CPU state, blocking soft interrupts and 1050 * discarding entropy in hard interrupts, so that we can 1051 * extract from the per-CPU pool. 1052 */ 1053 ec = percpu_getref(entropy_percpu); 1054 s = splsoftserial(); 1055 KASSERT(!ec->ec_locked); 1056 ec->ec_locked = true; 1057 __insn_barrier(); 1058 extra[i++] = entropy_timer(); 1059 1060 /* Extract the data and count it no longer pending. */ 1061 entpool_extract(ec->ec_pool, buf, sizeof buf); 1062 atomic_store_relaxed(&ec->ec_pending, 0); 1063 extra[i++] = entropy_timer(); 1064 1065 /* Release the per-CPU state. */ 1066 KASSERT(ec->ec_locked); 1067 __insn_barrier(); 1068 ec->ec_locked = false; 1069 splx(s); 1070 percpu_putref(entropy_percpu); 1071 extra[i++] = entropy_timer(); 1072 1073 /* 1074 * Copy over statistics, and enter the per-CPU extract and the 1075 * extra timing into the temporary pool, under the global lock. 1076 */ 1077 mutex_enter(&E->lock); 1078 extra[i++] = entropy_timer(); 1079 entpool_enter(pool, buf, sizeof buf); 1080 explicit_memset(buf, 0, sizeof buf); 1081 extra[i++] = entropy_timer(); 1082 KASSERT(i == __arraycount(extra)); 1083 entpool_enter(pool, extra, sizeof extra); 1084 explicit_memset(extra, 0, sizeof extra); 1085 mutex_exit(&E->lock); 1086 } 1087 1088 /* 1089 * entropy_notify() 1090 * 1091 * Caller just contributed entropy to the global pool. Advance 1092 * the entropy epoch and notify waiters. 1093 * 1094 * Caller must hold the global entropy lock. Except for the 1095 * `sysctl -w kern.entropy.consolidate=1` trigger, the caller must 1096 * have just have transitioned from partial entropy to full 1097 * entropy -- E->needed should be zero now. 1098 */ 1099 static void 1100 entropy_notify(void) 1101 { 1102 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1103 static struct timeval lasttime; /* serialized by E->lock */ 1104 unsigned epoch; 1105 1106 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1107 1108 /* 1109 * If this is the first time, print a message to the console 1110 * that we're ready so operators can compare it to the timing 1111 * of other events. 1112 */ 1113 if (__predict_false(!rnd_initial_entropy) && E->needed == 0) { 1114 printf("entropy: ready\n"); 1115 rnd_initial_entropy = 1; 1116 } 1117 1118 /* Set the epoch; roll over from UINTMAX-1 to 1. */ 1119 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || 1120 ratecheck(&lasttime, &interval)) { 1121 epoch = E->epoch + 1; 1122 if (epoch == 0 || epoch == (unsigned)-1) 1123 epoch = 1; 1124 atomic_store_relaxed(&E->epoch, epoch); 1125 } 1126 1127 /* Notify waiters. */ 1128 if (E->stage >= ENTROPY_WARM) { 1129 cv_broadcast(&E->cv); 1130 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); 1131 } 1132 1133 /* Count another notification. */ 1134 entropy_notify_evcnt.ev_count++; 1135 } 1136 1137 /* 1138 * entropy_consolidate() 1139 * 1140 * Trigger entropy consolidation and wait for it to complete. 1141 * 1142 * This should be used sparingly, not periodically -- requiring 1143 * conscious intervention by the operator or a clear policy 1144 * decision. Otherwise, the kernel will automatically consolidate 1145 * when enough entropy has been gathered into per-CPU pools to 1146 * transition to full entropy. 1147 */ 1148 void 1149 entropy_consolidate(void) 1150 { 1151 uint64_t ticket; 1152 int error; 1153 1154 KASSERT(E->stage == ENTROPY_HOT); 1155 1156 mutex_enter(&E->lock); 1157 ticket = entropy_consolidate_evcnt.ev_count; 1158 E->consolidate = true; 1159 cv_broadcast(&E->cv); 1160 while (ticket == entropy_consolidate_evcnt.ev_count) { 1161 error = cv_wait_sig(&E->cv, &E->lock); 1162 if (error) 1163 break; 1164 } 1165 mutex_exit(&E->lock); 1166 } 1167 1168 /* 1169 * sysctl -w kern.entropy.consolidate=1 1170 * 1171 * Trigger entropy consolidation and wait for it to complete. 1172 * Writable only by superuser. This, writing to /dev/random, and 1173 * ioctl(RNDADDDATA) are the only ways for the system to 1174 * consolidate entropy if the operator knows something the kernel 1175 * doesn't about how unpredictable the pending entropy pools are. 1176 */ 1177 static int 1178 sysctl_entropy_consolidate(SYSCTLFN_ARGS) 1179 { 1180 struct sysctlnode node = *rnode; 1181 int arg; 1182 int error; 1183 1184 KASSERT(E->stage == ENTROPY_HOT); 1185 1186 node.sysctl_data = &arg; 1187 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1188 if (error || newp == NULL) 1189 return error; 1190 if (arg) 1191 entropy_consolidate(); 1192 1193 return error; 1194 } 1195 1196 /* 1197 * sysctl -w kern.entropy.gather=1 1198 * 1199 * Trigger gathering entropy from all on-demand sources, and wait 1200 * for synchronous sources (but not asynchronous sources) to 1201 * complete. Writable only by superuser. 1202 */ 1203 static int 1204 sysctl_entropy_gather(SYSCTLFN_ARGS) 1205 { 1206 struct sysctlnode node = *rnode; 1207 int arg; 1208 int error; 1209 1210 KASSERT(E->stage == ENTROPY_HOT); 1211 1212 node.sysctl_data = &arg; 1213 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1214 if (error || newp == NULL) 1215 return error; 1216 if (arg) { 1217 mutex_enter(&E->lock); 1218 entropy_request(ENTROPY_CAPACITY); 1219 mutex_exit(&E->lock); 1220 } 1221 1222 return 0; 1223 } 1224 1225 /* 1226 * entropy_extract(buf, len, flags) 1227 * 1228 * Extract len bytes from the global entropy pool into buf. 1229 * 1230 * Flags may have: 1231 * 1232 * ENTROPY_WAIT Wait for entropy if not available yet. 1233 * ENTROPY_SIG Allow interruption by a signal during wait. 1234 * 1235 * Return zero on success, or error on failure: 1236 * 1237 * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. 1238 * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. 1239 * 1240 * If ENTROPY_WAIT is set, allowed only in thread context. If 1241 * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's 1242 * awfully high... Do we really need it in hard interrupts? This 1243 * arises from use of cprng_strong(9).) 1244 */ 1245 int 1246 entropy_extract(void *buf, size_t len, int flags) 1247 { 1248 static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; 1249 static struct timeval lasttime; /* serialized by E->lock */ 1250 int error; 1251 1252 if (ISSET(flags, ENTROPY_WAIT)) { 1253 ASSERT_SLEEPABLE(); 1254 KASSERTMSG(E->stage >= ENTROPY_WARM, 1255 "can't wait for entropy until warm"); 1256 } 1257 1258 /* Acquire the global lock to get at the global pool. */ 1259 if (E->stage >= ENTROPY_WARM) 1260 mutex_enter(&E->lock); 1261 1262 /* Count up request for entropy in interrupt context. */ 1263 if (cpu_intr_p()) 1264 entropy_extract_intr_evcnt.ev_count++; 1265 1266 /* Wait until there is enough entropy in the system. */ 1267 error = 0; 1268 while (E->needed) { 1269 /* Ask for more, synchronously if possible. */ 1270 entropy_request(len); 1271 1272 /* If we got enough, we're done. */ 1273 if (E->needed == 0) { 1274 KASSERT(error == 0); 1275 break; 1276 } 1277 1278 /* If not waiting, stop here. */ 1279 if (!ISSET(flags, ENTROPY_WAIT)) { 1280 error = EWOULDBLOCK; 1281 break; 1282 } 1283 1284 /* Wait for some entropy to come in and try again. */ 1285 KASSERT(E->stage >= ENTROPY_WARM); 1286 if (ISSET(flags, ENTROPY_SIG)) { 1287 error = cv_wait_sig(&E->cv, &E->lock); 1288 if (error) 1289 break; 1290 } else { 1291 cv_wait(&E->cv, &E->lock); 1292 } 1293 } 1294 1295 /* Count failure -- but fill the buffer nevertheless. */ 1296 if (error) 1297 entropy_extract_fail_evcnt.ev_count++; 1298 1299 /* 1300 * Report a warning if we have never yet reached full entropy. 1301 * This is the only case where we consider entropy to be 1302 * `depleted' without kern.entropy.depletion enabled -- when we 1303 * only have partial entropy, an adversary may be able to 1304 * narrow the state of the pool down to a small number of 1305 * possibilities; the output then enables them to confirm a 1306 * guess, reducing its entropy from the adversary's perspective 1307 * to zero. 1308 */ 1309 if (__predict_false(E->epoch == (unsigned)-1)) { 1310 if (ratecheck(&lasttime, &interval)) 1311 printf("entropy: WARNING:" 1312 " extracting entropy too early\n"); 1313 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY); 1314 } 1315 1316 /* Extract data from the pool, and `deplete' if we're doing that. */ 1317 entpool_extract(&E->pool, buf, len); 1318 if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && 1319 error == 0) { 1320 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; 1321 1322 atomic_store_relaxed(&E->needed, 1323 E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost)); 1324 entropy_deplete_evcnt.ev_count++; 1325 } 1326 1327 /* Release the global lock and return the error. */ 1328 if (E->stage >= ENTROPY_WARM) 1329 mutex_exit(&E->lock); 1330 return error; 1331 } 1332 1333 /* 1334 * entropy_poll(events) 1335 * 1336 * Return the subset of events ready, and if it is not all of 1337 * events, record curlwp as waiting for entropy. 1338 */ 1339 int 1340 entropy_poll(int events) 1341 { 1342 int revents = 0; 1343 1344 KASSERT(E->stage >= ENTROPY_WARM); 1345 1346 /* Always ready for writing. */ 1347 revents |= events & (POLLOUT|POLLWRNORM); 1348 1349 /* Narrow it down to reads. */ 1350 events &= POLLIN|POLLRDNORM; 1351 if (events == 0) 1352 return revents; 1353 1354 /* 1355 * If we have reached full entropy and we're not depleting 1356 * entropy, we are forever ready. 1357 */ 1358 if (__predict_true(atomic_load_relaxed(&E->needed) == 0) && 1359 __predict_true(!atomic_load_relaxed(&entropy_depletion))) 1360 return revents | events; 1361 1362 /* 1363 * Otherwise, check whether we need entropy under the lock. If 1364 * we don't, we're ready; if we do, add ourselves to the queue. 1365 */ 1366 mutex_enter(&E->lock); 1367 if (E->needed == 0) 1368 revents |= events; 1369 else 1370 selrecord(curlwp, &E->selq); 1371 mutex_exit(&E->lock); 1372 1373 return revents; 1374 } 1375 1376 /* 1377 * filt_entropy_read_detach(kn) 1378 * 1379 * struct filterops::f_detach callback for entropy read events: 1380 * remove kn from the list of waiters. 1381 */ 1382 static void 1383 filt_entropy_read_detach(struct knote *kn) 1384 { 1385 1386 KASSERT(E->stage >= ENTROPY_WARM); 1387 1388 mutex_enter(&E->lock); 1389 SLIST_REMOVE(&E->selq.sel_klist, kn, knote, kn_selnext); 1390 mutex_exit(&E->lock); 1391 } 1392 1393 /* 1394 * filt_entropy_read_event(kn, hint) 1395 * 1396 * struct filterops::f_event callback for entropy read events: 1397 * poll for entropy. Caller must hold the global entropy lock if 1398 * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. 1399 */ 1400 static int 1401 filt_entropy_read_event(struct knote *kn, long hint) 1402 { 1403 int ret; 1404 1405 KASSERT(E->stage >= ENTROPY_WARM); 1406 1407 /* Acquire the lock, if caller is outside entropy subsystem. */ 1408 if (hint == NOTE_SUBMIT) 1409 KASSERT(mutex_owned(&E->lock)); 1410 else 1411 mutex_enter(&E->lock); 1412 1413 /* 1414 * If we still need entropy, can't read anything; if not, can 1415 * read arbitrarily much. 1416 */ 1417 if (E->needed != 0) { 1418 ret = 0; 1419 } else { 1420 if (atomic_load_relaxed(&entropy_depletion)) 1421 kn->kn_data = ENTROPY_CAPACITY*NBBY; 1422 else 1423 kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); 1424 ret = 1; 1425 } 1426 1427 /* Release the lock, if caller is outside entropy subsystem. */ 1428 if (hint == NOTE_SUBMIT) 1429 KASSERT(mutex_owned(&E->lock)); 1430 else 1431 mutex_exit(&E->lock); 1432 1433 return ret; 1434 } 1435 1436 static const struct filterops entropy_read_filtops = { 1437 .f_isfd = 1, /* XXX Makes sense only for /dev/u?random. */ 1438 .f_attach = NULL, 1439 .f_detach = filt_entropy_read_detach, 1440 .f_event = filt_entropy_read_event, 1441 }; 1442 1443 /* 1444 * entropy_kqfilter(kn) 1445 * 1446 * Register kn to receive entropy event notifications. May be 1447 * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. 1448 */ 1449 int 1450 entropy_kqfilter(struct knote *kn) 1451 { 1452 1453 KASSERT(E->stage >= ENTROPY_WARM); 1454 1455 switch (kn->kn_filter) { 1456 case EVFILT_READ: 1457 /* Enter into the global select queue. */ 1458 mutex_enter(&E->lock); 1459 kn->kn_fop = &entropy_read_filtops; 1460 SLIST_INSERT_HEAD(&E->selq.sel_klist, kn, kn_selnext); 1461 mutex_exit(&E->lock); 1462 return 0; 1463 case EVFILT_WRITE: 1464 /* Can always dump entropy into the system. */ 1465 kn->kn_fop = &seltrue_filtops; 1466 return 0; 1467 default: 1468 return EINVAL; 1469 } 1470 } 1471 1472 /* 1473 * rndsource_setcb(rs, get, getarg) 1474 * 1475 * Set the request callback for the entropy source rs, if it can 1476 * provide entropy on demand. Must precede rnd_attach_source. 1477 */ 1478 void 1479 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *), 1480 void *getarg) 1481 { 1482 1483 rs->get = get; 1484 rs->getarg = getarg; 1485 } 1486 1487 /* 1488 * rnd_attach_source(rs, name, type, flags) 1489 * 1490 * Attach the entropy source rs. Must be done after 1491 * rndsource_setcb, if any, and before any calls to rnd_add_data. 1492 */ 1493 void 1494 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type, 1495 uint32_t flags) 1496 { 1497 uint32_t extra[4]; 1498 unsigned i = 0; 1499 1500 /* Grab cycle counter to mix extra into the pool. */ 1501 extra[i++] = entropy_timer(); 1502 1503 /* 1504 * Apply some standard flags: 1505 * 1506 * - We do not bother with network devices by default, for 1507 * hysterical raisins (perhaps: because it is often the case 1508 * that an adversary can influence network packet timings). 1509 */ 1510 switch (type) { 1511 case RND_TYPE_NET: 1512 flags |= RND_FLAG_NO_COLLECT; 1513 break; 1514 } 1515 1516 /* Sanity-check the callback if RND_FLAG_HASCB is set. */ 1517 KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL); 1518 1519 /* Initialize the random source. */ 1520 memset(rs->name, 0, sizeof(rs->name)); /* paranoia */ 1521 strlcpy(rs->name, name, sizeof(rs->name)); 1522 rs->total = 0; 1523 rs->type = type; 1524 rs->flags = flags; 1525 if (E->stage >= ENTROPY_WARM) 1526 rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); 1527 extra[i++] = entropy_timer(); 1528 1529 /* Wire it into the global list of random sources. */ 1530 if (E->stage >= ENTROPY_WARM) 1531 mutex_enter(&E->lock); 1532 LIST_INSERT_HEAD(&E->sources, rs, list); 1533 if (E->stage >= ENTROPY_WARM) 1534 mutex_exit(&E->lock); 1535 extra[i++] = entropy_timer(); 1536 1537 /* Request that it provide entropy ASAP, if we can. */ 1538 if (ISSET(flags, RND_FLAG_HASCB)) 1539 (*rs->get)(ENTROPY_CAPACITY, rs->getarg); 1540 extra[i++] = entropy_timer(); 1541 1542 /* Mix the extra into the pool. */ 1543 KASSERT(i == __arraycount(extra)); 1544 entropy_enter(extra, sizeof extra, 0); 1545 explicit_memset(extra, 0, sizeof extra); 1546 } 1547 1548 /* 1549 * rnd_detach_source(rs) 1550 * 1551 * Detach the entropy source rs. May sleep waiting for users to 1552 * drain. Further use is not allowed. 1553 */ 1554 void 1555 rnd_detach_source(struct krndsource *rs) 1556 { 1557 1558 /* 1559 * If we're cold (shouldn't happen, but hey), just remove it 1560 * from the list -- there's nothing allocated. 1561 */ 1562 if (E->stage == ENTROPY_COLD) { 1563 LIST_REMOVE(rs, list); 1564 return; 1565 } 1566 1567 /* We may have to wait for entropy_request. */ 1568 ASSERT_SLEEPABLE(); 1569 1570 /* Wait until the source list is not in use, and remove it. */ 1571 mutex_enter(&E->lock); 1572 while (E->sourcelock) 1573 cv_wait(&E->cv, &E->lock); 1574 LIST_REMOVE(rs, list); 1575 mutex_exit(&E->lock); 1576 1577 /* Free the per-CPU data. */ 1578 percpu_free(rs->state, sizeof(struct rndsource_cpu)); 1579 } 1580 1581 /* 1582 * rnd_lock_sources() 1583 * 1584 * Prevent changes to the list of rndsources while we iterate it. 1585 * Interruptible. Caller must hold the global entropy lock. If 1586 * successful, no rndsource will go away until rnd_unlock_sources 1587 * even while the caller releases the global entropy lock. 1588 */ 1589 static int 1590 rnd_lock_sources(void) 1591 { 1592 int error; 1593 1594 KASSERT(mutex_owned(&E->lock)); 1595 1596 while (E->sourcelock) { 1597 error = cv_wait_sig(&E->cv, &E->lock); 1598 if (error) 1599 return error; 1600 } 1601 1602 E->sourcelock = curlwp; 1603 return 0; 1604 } 1605 1606 /* 1607 * rnd_trylock_sources() 1608 * 1609 * Try to lock the list of sources, but if it's already locked, 1610 * fail. Caller must hold the global entropy lock. If 1611 * successful, no rndsource will go away until rnd_unlock_sources 1612 * even while the caller releases the global entropy lock. 1613 */ 1614 static bool 1615 rnd_trylock_sources(void) 1616 { 1617 1618 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1619 1620 if (E->sourcelock) 1621 return false; 1622 E->sourcelock = curlwp; 1623 return true; 1624 } 1625 1626 /* 1627 * rnd_unlock_sources() 1628 * 1629 * Unlock the list of sources after rnd_lock_sources or 1630 * rnd_trylock_sources. Caller must hold the global entropy lock. 1631 */ 1632 static void 1633 rnd_unlock_sources(void) 1634 { 1635 1636 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1637 1638 KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p", 1639 curlwp, E->sourcelock); 1640 E->sourcelock = NULL; 1641 if (E->stage >= ENTROPY_WARM) 1642 cv_broadcast(&E->cv); 1643 } 1644 1645 /* 1646 * rnd_sources_locked() 1647 * 1648 * True if we hold the list of rndsources locked, for diagnostic 1649 * assertions. 1650 */ 1651 static bool __diagused 1652 rnd_sources_locked(void) 1653 { 1654 1655 return E->sourcelock == curlwp; 1656 } 1657 1658 /* 1659 * entropy_request(nbytes) 1660 * 1661 * Request nbytes bytes of entropy from all sources in the system. 1662 * OK if we overdo it. Caller must hold the global entropy lock; 1663 * will release and re-acquire it. 1664 */ 1665 static void 1666 entropy_request(size_t nbytes) 1667 { 1668 struct krndsource *rs; 1669 1670 KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock)); 1671 1672 /* 1673 * If there is a request in progress, let it proceed. 1674 * Otherwise, note that a request is in progress to avoid 1675 * reentry and to block rnd_detach_source until we're done. 1676 */ 1677 if (!rnd_trylock_sources()) 1678 return; 1679 entropy_request_evcnt.ev_count++; 1680 1681 /* Clamp to the maximum reasonable request. */ 1682 nbytes = MIN(nbytes, ENTROPY_CAPACITY); 1683 1684 /* Walk the list of sources. */ 1685 LIST_FOREACH(rs, &E->sources, list) { 1686 /* Skip sources without callbacks. */ 1687 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 1688 continue; 1689 1690 /* 1691 * Skip sources that are disabled altogether -- we 1692 * would just ignore their samples anyway. 1693 */ 1694 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) 1695 continue; 1696 1697 /* Drop the lock while we call the callback. */ 1698 if (E->stage >= ENTROPY_WARM) 1699 mutex_exit(&E->lock); 1700 (*rs->get)(nbytes, rs->getarg); 1701 if (E->stage >= ENTROPY_WARM) 1702 mutex_enter(&E->lock); 1703 } 1704 1705 /* Notify rnd_detach_source that the request is done. */ 1706 rnd_unlock_sources(); 1707 } 1708 1709 /* 1710 * rnd_add_uint32(rs, value) 1711 * 1712 * Enter 32 bits of data from an entropy source into the pool. 1713 * 1714 * If rs is NULL, may not be called from interrupt context. 1715 * 1716 * If rs is non-NULL, may be called from any context. May drop 1717 * data if called from interrupt context. 1718 */ 1719 void 1720 rnd_add_uint32(struct krndsource *rs, uint32_t value) 1721 { 1722 1723 rnd_add_data(rs, &value, sizeof value, 0); 1724 } 1725 1726 void 1727 _rnd_add_uint32(struct krndsource *rs, uint32_t value) 1728 { 1729 1730 rnd_add_data(rs, &value, sizeof value, 0); 1731 } 1732 1733 void 1734 _rnd_add_uint64(struct krndsource *rs, uint64_t value) 1735 { 1736 1737 rnd_add_data(rs, &value, sizeof value, 0); 1738 } 1739 1740 /* 1741 * rnd_add_data(rs, buf, len, entropybits) 1742 * 1743 * Enter data from an entropy source into the pool, with a 1744 * driver's estimate of how much entropy the physical source of 1745 * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's 1746 * estimate and treat it as zero. 1747 * 1748 * If rs is NULL, may not be called from interrupt context. 1749 * 1750 * If rs is non-NULL, may be called from any context. May drop 1751 * data if called from interrupt context. 1752 */ 1753 void 1754 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, 1755 uint32_t entropybits) 1756 { 1757 uint32_t extra; 1758 uint32_t flags; 1759 1760 KASSERTMSG(howmany(entropybits, NBBY) <= len, 1761 "%s: impossible entropy rate:" 1762 " %"PRIu32" bits in %"PRIu32"-byte string", 1763 rs ? rs->name : "(anonymous)", entropybits, len); 1764 1765 /* If there's no rndsource, just enter the data and time now. */ 1766 if (rs == NULL) { 1767 entropy_enter(buf, len, entropybits); 1768 extra = entropy_timer(); 1769 entropy_enter(&extra, sizeof extra, 0); 1770 explicit_memset(&extra, 0, sizeof extra); 1771 return; 1772 } 1773 1774 /* Load a snapshot of the flags. Ioctl may change them under us. */ 1775 flags = atomic_load_relaxed(&rs->flags); 1776 1777 /* 1778 * Skip if: 1779 * - we're not collecting entropy, or 1780 * - the operator doesn't want to collect entropy from this, or 1781 * - neither data nor timings are being collected from this. 1782 */ 1783 if (!atomic_load_relaxed(&entropy_collection) || 1784 ISSET(flags, RND_FLAG_NO_COLLECT) || 1785 !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) 1786 return; 1787 1788 /* If asked, ignore the estimate. */ 1789 if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) 1790 entropybits = 0; 1791 1792 /* If we are collecting data, enter them. */ 1793 if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) 1794 rnd_add_data_1(rs, buf, len, entropybits); 1795 1796 /* If we are collecting timings, enter one. */ 1797 if (ISSET(flags, RND_FLAG_COLLECT_TIME)) { 1798 extra = entropy_timer(); 1799 rnd_add_data_1(rs, &extra, sizeof extra, 0); 1800 } 1801 } 1802 1803 /* 1804 * rnd_add_data_1(rs, buf, len, entropybits) 1805 * 1806 * Internal subroutine to call either entropy_enter_intr, if we're 1807 * in interrupt context, or entropy_enter if not, and to count the 1808 * entropy in an rndsource. 1809 */ 1810 static void 1811 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, 1812 uint32_t entropybits) 1813 { 1814 bool fullyused; 1815 1816 /* 1817 * If we're in interrupt context, use entropy_enter_intr and 1818 * take note of whether it consumed the full sample; if not, 1819 * use entropy_enter, which always consumes the full sample. 1820 */ 1821 if (curlwp && cpu_intr_p()) { 1822 fullyused = entropy_enter_intr(buf, len, entropybits); 1823 } else { 1824 entropy_enter(buf, len, entropybits); 1825 fullyused = true; 1826 } 1827 1828 /* 1829 * If we used the full sample, note how many bits were 1830 * contributed from this source. 1831 */ 1832 if (fullyused) { 1833 if (E->stage < ENTROPY_HOT) { 1834 if (E->stage >= ENTROPY_WARM) 1835 mutex_enter(&E->lock); 1836 rs->total += MIN(UINT_MAX - rs->total, entropybits); 1837 if (E->stage >= ENTROPY_WARM) 1838 mutex_exit(&E->lock); 1839 } else { 1840 struct rndsource_cpu *rc = percpu_getref(rs->state); 1841 unsigned nbits = rc->rc_nbits; 1842 1843 nbits += MIN(UINT_MAX - nbits, entropybits); 1844 atomic_store_relaxed(&rc->rc_nbits, nbits); 1845 percpu_putref(rs->state); 1846 } 1847 } 1848 } 1849 1850 /* 1851 * rnd_add_data_sync(rs, buf, len, entropybits) 1852 * 1853 * Same as rnd_add_data. Originally used in rndsource callbacks, 1854 * to break an unnecessary cycle; no longer really needed. 1855 */ 1856 void 1857 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, 1858 uint32_t entropybits) 1859 { 1860 1861 rnd_add_data(rs, buf, len, entropybits); 1862 } 1863 1864 /* 1865 * rndsource_entropybits(rs) 1866 * 1867 * Return approximately the number of bits of entropy that have 1868 * been contributed via rs so far. Approximate if other CPUs may 1869 * be calling rnd_add_data concurrently. 1870 */ 1871 static unsigned 1872 rndsource_entropybits(struct krndsource *rs) 1873 { 1874 unsigned nbits = rs->total; 1875 1876 KASSERT(E->stage >= ENTROPY_WARM); 1877 KASSERT(rnd_sources_locked()); 1878 percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits); 1879 return nbits; 1880 } 1881 1882 static void 1883 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci) 1884 { 1885 struct rndsource_cpu *rc = ptr; 1886 unsigned *nbitsp = cookie; 1887 unsigned cpu_nbits; 1888 1889 cpu_nbits = atomic_load_relaxed(&rc->rc_nbits); 1890 *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits); 1891 } 1892 1893 /* 1894 * rndsource_to_user(rs, urs) 1895 * 1896 * Copy a description of rs out to urs for userland. 1897 */ 1898 static void 1899 rndsource_to_user(struct krndsource *rs, rndsource_t *urs) 1900 { 1901 1902 KASSERT(E->stage >= ENTROPY_WARM); 1903 KASSERT(rnd_sources_locked()); 1904 1905 /* Avoid kernel memory disclosure. */ 1906 memset(urs, 0, sizeof(*urs)); 1907 1908 CTASSERT(sizeof(urs->name) == sizeof(rs->name)); 1909 strlcpy(urs->name, rs->name, sizeof(urs->name)); 1910 urs->total = rndsource_entropybits(rs); 1911 urs->type = rs->type; 1912 urs->flags = atomic_load_relaxed(&rs->flags); 1913 } 1914 1915 /* 1916 * rndsource_to_user_est(rs, urse) 1917 * 1918 * Copy a description of rs and estimation statistics out to urse 1919 * for userland. 1920 */ 1921 static void 1922 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) 1923 { 1924 1925 KASSERT(E->stage >= ENTROPY_WARM); 1926 KASSERT(rnd_sources_locked()); 1927 1928 /* Avoid kernel memory disclosure. */ 1929 memset(urse, 0, sizeof(*urse)); 1930 1931 /* Copy out the rndsource description. */ 1932 rndsource_to_user(rs, &urse->rt); 1933 1934 /* Zero out the statistics because we don't do estimation. */ 1935 urse->dt_samples = 0; 1936 urse->dt_total = 0; 1937 urse->dv_samples = 0; 1938 urse->dv_total = 0; 1939 } 1940 1941 /* 1942 * entropy_reset_xc(arg1, arg2) 1943 * 1944 * Reset the current CPU's pending entropy to zero. 1945 */ 1946 static void 1947 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) 1948 { 1949 uint32_t extra = entropy_timer(); 1950 struct entropy_cpu *ec; 1951 int s; 1952 1953 /* 1954 * Acquire the per-CPU state, blocking soft interrupts and 1955 * causing hard interrupts to drop samples on the floor. 1956 */ 1957 ec = percpu_getref(entropy_percpu); 1958 s = splsoftserial(); 1959 KASSERT(!ec->ec_locked); 1960 ec->ec_locked = true; 1961 __insn_barrier(); 1962 1963 /* Zero the pending count and enter a cycle count for fun. */ 1964 ec->ec_pending = 0; 1965 entpool_enter(ec->ec_pool, &extra, sizeof extra); 1966 1967 /* Release the per-CPU state. */ 1968 KASSERT(ec->ec_locked); 1969 __insn_barrier(); 1970 ec->ec_locked = false; 1971 splx(s); 1972 percpu_putref(entropy_percpu); 1973 } 1974 1975 /* 1976 * entropy_ioctl(cmd, data) 1977 * 1978 * Handle various /dev/random ioctl queries. 1979 */ 1980 int 1981 entropy_ioctl(unsigned long cmd, void *data) 1982 { 1983 struct krndsource *rs; 1984 bool privileged; 1985 int error; 1986 1987 KASSERT(E->stage >= ENTROPY_WARM); 1988 1989 /* Verify user's authorization to perform the ioctl. */ 1990 switch (cmd) { 1991 case RNDGETENTCNT: 1992 case RNDGETPOOLSTAT: 1993 case RNDGETSRCNUM: 1994 case RNDGETSRCNAME: 1995 case RNDGETESTNUM: 1996 case RNDGETESTNAME: 1997 error = kauth_authorize_device(curlwp->l_cred, 1998 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 1999 break; 2000 case RNDCTL: 2001 error = kauth_authorize_device(curlwp->l_cred, 2002 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 2003 break; 2004 case RNDADDDATA: 2005 error = kauth_authorize_device(curlwp->l_cred, 2006 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 2007 /* Ascertain whether the user's inputs should be counted. */ 2008 if (kauth_authorize_device(curlwp->l_cred, 2009 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 2010 NULL, NULL, NULL, NULL) == 0) 2011 privileged = true; 2012 break; 2013 default: { 2014 /* 2015 * XXX Hack to avoid changing module ABI so this can be 2016 * pulled up. Later, we can just remove the argument. 2017 */ 2018 static const struct fileops fops = { 2019 .fo_ioctl = rnd_system_ioctl, 2020 }; 2021 struct file f = { 2022 .f_ops = &fops, 2023 }; 2024 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data), 2025 enosys(), error); 2026 #if defined(_LP64) 2027 if (error == ENOSYS) 2028 MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data), 2029 enosys(), error); 2030 #endif 2031 if (error == ENOSYS) 2032 error = ENOTTY; 2033 break; 2034 } 2035 } 2036 2037 /* If anything went wrong with authorization, stop here. */ 2038 if (error) 2039 return error; 2040 2041 /* Dispatch on the command. */ 2042 switch (cmd) { 2043 case RNDGETENTCNT: { /* Get current entropy count in bits. */ 2044 uint32_t *countp = data; 2045 2046 mutex_enter(&E->lock); 2047 *countp = ENTROPY_CAPACITY*NBBY - E->needed; 2048 mutex_exit(&E->lock); 2049 2050 break; 2051 } 2052 case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ 2053 rndpoolstat_t *pstat = data; 2054 2055 mutex_enter(&E->lock); 2056 2057 /* parameters */ 2058 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */ 2059 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ 2060 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ 2061 2062 /* state */ 2063 pstat->added = 0; /* XXX total entropy_enter count */ 2064 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed; 2065 pstat->removed = 0; /* XXX total entropy_extract count */ 2066 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ 2067 pstat->generated = 0; /* XXX bits of data...fabricated? */ 2068 2069 mutex_exit(&E->lock); 2070 break; 2071 } 2072 case RNDGETSRCNUM: { /* Get entropy sources by number. */ 2073 rndstat_t *stat = data; 2074 uint32_t start = 0, i = 0; 2075 2076 /* Skip if none requested; fail if too many requested. */ 2077 if (stat->count == 0) 2078 break; 2079 if (stat->count > RND_MAXSTATCOUNT) 2080 return EINVAL; 2081 2082 /* 2083 * Under the lock, find the first one, copy out as many 2084 * as requested, and report how many we copied out. 2085 */ 2086 mutex_enter(&E->lock); 2087 error = rnd_lock_sources(); 2088 if (error) { 2089 mutex_exit(&E->lock); 2090 return error; 2091 } 2092 LIST_FOREACH(rs, &E->sources, list) { 2093 if (start++ == stat->start) 2094 break; 2095 } 2096 while (i < stat->count && rs != NULL) { 2097 mutex_exit(&E->lock); 2098 rndsource_to_user(rs, &stat->source[i++]); 2099 mutex_enter(&E->lock); 2100 rs = LIST_NEXT(rs, list); 2101 } 2102 KASSERT(i <= stat->count); 2103 stat->count = i; 2104 rnd_unlock_sources(); 2105 mutex_exit(&E->lock); 2106 break; 2107 } 2108 case RNDGETESTNUM: { /* Get sources and estimates by number. */ 2109 rndstat_est_t *estat = data; 2110 uint32_t start = 0, i = 0; 2111 2112 /* Skip if none requested; fail if too many requested. */ 2113 if (estat->count == 0) 2114 break; 2115 if (estat->count > RND_MAXSTATCOUNT) 2116 return EINVAL; 2117 2118 /* 2119 * Under the lock, find the first one, copy out as many 2120 * as requested, and report how many we copied out. 2121 */ 2122 mutex_enter(&E->lock); 2123 error = rnd_lock_sources(); 2124 if (error) { 2125 mutex_exit(&E->lock); 2126 return error; 2127 } 2128 LIST_FOREACH(rs, &E->sources, list) { 2129 if (start++ == estat->start) 2130 break; 2131 } 2132 while (i < estat->count && rs != NULL) { 2133 mutex_exit(&E->lock); 2134 rndsource_to_user_est(rs, &estat->source[i++]); 2135 mutex_enter(&E->lock); 2136 rs = LIST_NEXT(rs, list); 2137 } 2138 KASSERT(i <= estat->count); 2139 estat->count = i; 2140 rnd_unlock_sources(); 2141 mutex_exit(&E->lock); 2142 break; 2143 } 2144 case RNDGETSRCNAME: { /* Get entropy sources by name. */ 2145 rndstat_name_t *nstat = data; 2146 const size_t n = sizeof(rs->name); 2147 2148 CTASSERT(sizeof(rs->name) == sizeof(nstat->name)); 2149 2150 /* 2151 * Under the lock, search by name. If found, copy it 2152 * out; if not found, fail with ENOENT. 2153 */ 2154 mutex_enter(&E->lock); 2155 error = rnd_lock_sources(); 2156 if (error) { 2157 mutex_exit(&E->lock); 2158 return error; 2159 } 2160 LIST_FOREACH(rs, &E->sources, list) { 2161 if (strncmp(rs->name, nstat->name, n) == 0) 2162 break; 2163 } 2164 if (rs != NULL) { 2165 mutex_exit(&E->lock); 2166 rndsource_to_user(rs, &nstat->source); 2167 mutex_enter(&E->lock); 2168 } else { 2169 error = ENOENT; 2170 } 2171 rnd_unlock_sources(); 2172 mutex_exit(&E->lock); 2173 break; 2174 } 2175 case RNDGETESTNAME: { /* Get sources and estimates by name. */ 2176 rndstat_est_name_t *enstat = data; 2177 const size_t n = sizeof(rs->name); 2178 2179 CTASSERT(sizeof(rs->name) == sizeof(enstat->name)); 2180 2181 /* 2182 * Under the lock, search by name. If found, copy it 2183 * out; if not found, fail with ENOENT. 2184 */ 2185 mutex_enter(&E->lock); 2186 error = rnd_lock_sources(); 2187 if (error) { 2188 mutex_exit(&E->lock); 2189 return error; 2190 } 2191 LIST_FOREACH(rs, &E->sources, list) { 2192 if (strncmp(rs->name, enstat->name, n) == 0) 2193 break; 2194 } 2195 if (rs != NULL) { 2196 mutex_exit(&E->lock); 2197 rndsource_to_user_est(rs, &enstat->source); 2198 mutex_enter(&E->lock); 2199 } else { 2200 error = ENOENT; 2201 } 2202 rnd_unlock_sources(); 2203 mutex_exit(&E->lock); 2204 break; 2205 } 2206 case RNDCTL: { /* Modify entropy source flags. */ 2207 rndctl_t *rndctl = data; 2208 const size_t n = sizeof(rs->name); 2209 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2210 uint32_t flags; 2211 bool reset = false, request = false; 2212 2213 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); 2214 2215 /* Whitelist the flags that user can change. */ 2216 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; 2217 2218 /* 2219 * For each matching rndsource, either by type if 2220 * specified or by name if not, set the masked flags. 2221 */ 2222 mutex_enter(&E->lock); 2223 LIST_FOREACH(rs, &E->sources, list) { 2224 if (rndctl->type != 0xff) { 2225 if (rs->type != rndctl->type) 2226 continue; 2227 } else { 2228 if (strncmp(rs->name, rndctl->name, n) != 0) 2229 continue; 2230 } 2231 flags = rs->flags & ~rndctl->mask; 2232 flags |= rndctl->flags & rndctl->mask; 2233 if ((rs->flags & resetflags) == 0 && 2234 (flags & resetflags) != 0) 2235 reset = true; 2236 if ((rs->flags ^ flags) & resetflags) 2237 request = true; 2238 atomic_store_relaxed(&rs->flags, flags); 2239 } 2240 mutex_exit(&E->lock); 2241 2242 /* 2243 * If we disabled estimation or collection, nix all the 2244 * pending entropy and set needed to the maximum. 2245 */ 2246 if (reset) { 2247 xc_broadcast(0, &entropy_reset_xc, NULL, NULL); 2248 mutex_enter(&E->lock); 2249 E->pending = 0; 2250 atomic_store_relaxed(&E->needed, 2251 ENTROPY_CAPACITY*NBBY); 2252 mutex_exit(&E->lock); 2253 } 2254 2255 /* 2256 * If we changed any of the estimation or collection 2257 * flags, request new samples from everyone -- either 2258 * to make up for what we just lost, or to get new 2259 * samples from what we just added. 2260 */ 2261 if (request) { 2262 mutex_enter(&E->lock); 2263 entropy_request(ENTROPY_CAPACITY); 2264 mutex_exit(&E->lock); 2265 } 2266 break; 2267 } 2268 case RNDADDDATA: { /* Enter seed into entropy pool. */ 2269 rnddata_t *rdata = data; 2270 unsigned entropybits = 0; 2271 2272 if (!atomic_load_relaxed(&entropy_collection)) 2273 break; /* thanks but no thanks */ 2274 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY)) 2275 return EINVAL; 2276 2277 /* 2278 * This ioctl serves as the userland alternative a 2279 * bootloader-provided seed -- typically furnished by 2280 * /etc/rc.d/random_seed. We accept the user's entropy 2281 * claim only if 2282 * 2283 * (a) the user is privileged, and 2284 * (b) we have not entered a bootloader seed. 2285 * 2286 * under the assumption that the user may use this to 2287 * load a seed from disk that we have already loaded 2288 * from the bootloader, so we don't double-count it. 2289 */ 2290 if (privileged && rdata->entropy && rdata->len) { 2291 mutex_enter(&E->lock); 2292 if (!E->seeded) { 2293 entropybits = MIN(rdata->entropy, 2294 MIN(rdata->len, ENTROPY_CAPACITY)*NBBY); 2295 E->seeded = true; 2296 } 2297 mutex_exit(&E->lock); 2298 } 2299 2300 /* Enter the data and consolidate entropy. */ 2301 rnd_add_data(&seed_rndsource, rdata->data, rdata->len, 2302 entropybits); 2303 entropy_consolidate(); 2304 break; 2305 } 2306 default: 2307 error = ENOTTY; 2308 } 2309 2310 /* Return any error that may have come up. */ 2311 return error; 2312 } 2313 2314 /* Legacy entry points */ 2315 2316 void 2317 rnd_seed(void *seed, size_t len) 2318 { 2319 2320 if (len != sizeof(rndsave_t)) { 2321 printf("entropy: invalid seed length: %zu," 2322 " expected sizeof(rndsave_t) = %zu\n", 2323 len, sizeof(rndsave_t)); 2324 return; 2325 } 2326 entropy_seed(seed); 2327 } 2328 2329 void 2330 rnd_init(void) 2331 { 2332 2333 entropy_init(); 2334 } 2335 2336 void 2337 rnd_init_softint(void) 2338 { 2339 2340 entropy_init_late(); 2341 } 2342 2343 int 2344 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) 2345 { 2346 2347 return entropy_ioctl(cmd, data); 2348 } 2349