1 /* $NetBSD: subr_cprng.c,v 1.29 2017/12/01 19:05:49 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Thor Lancelot Simon and Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.29 2017/12/01 19:05:49 christos Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/types.h> 37 #include <sys/condvar.h> 38 #include <sys/cprng.h> 39 #include <sys/errno.h> 40 #include <sys/event.h> /* XXX struct knote */ 41 #include <sys/fcntl.h> /* XXX FNONBLOCK */ 42 #include <sys/kernel.h> 43 #include <sys/kmem.h> 44 #include <sys/lwp.h> 45 #include <sys/once.h> 46 #include <sys/percpu.h> 47 #include <sys/poll.h> /* XXX POLLIN/POLLOUT/&c. */ 48 #include <sys/select.h> 49 #include <sys/systm.h> 50 #include <sys/sysctl.h> 51 #include <sys/rndsink.h> 52 #if DIAGNOSTIC 53 #include <sys/rngtest.h> 54 #endif 55 56 #include <crypto/nist_ctr_drbg/nist_ctr_drbg.h> 57 58 #if defined(__HAVE_CPU_COUNTER) 59 #include <machine/cpu_counter.h> 60 #endif 61 62 static int sysctl_kern_urnd(SYSCTLFN_PROTO); 63 static int sysctl_kern_arnd(SYSCTLFN_PROTO); 64 65 static void cprng_strong_generate(struct cprng_strong *, void *, size_t); 66 static void cprng_strong_reseed(struct cprng_strong *); 67 static void cprng_strong_reseed_from(struct cprng_strong *, const void *, 68 size_t, bool); 69 #if DIAGNOSTIC 70 static void cprng_strong_rngtest(struct cprng_strong *); 71 #endif 72 73 static rndsink_callback_t cprng_strong_rndsink_callback; 74 75 void 76 cprng_init(void) 77 { 78 static struct sysctllog *random_sysctllog; 79 80 nist_ctr_initialize(); 81 82 sysctl_createv(&random_sysctllog, 0, NULL, NULL, 83 CTLFLAG_PERMANENT, 84 CTLTYPE_INT, "urandom", 85 SYSCTL_DESCR("Random integer value"), 86 sysctl_kern_urnd, 0, NULL, 0, 87 CTL_KERN, KERN_URND, CTL_EOL); 88 sysctl_createv(&random_sysctllog, 0, NULL, NULL, 89 CTLFLAG_PERMANENT, 90 CTLTYPE_INT, "arandom", 91 SYSCTL_DESCR("n bytes of random data"), 92 sysctl_kern_arnd, 0, NULL, 0, 93 CTL_KERN, KERN_ARND, CTL_EOL); 94 } 95 96 static inline uint32_t 97 cprng_counter(void) 98 { 99 struct timeval tv; 100 101 #if defined(__HAVE_CPU_COUNTER) 102 if (cpu_hascounter()) 103 return cpu_counter32(); 104 #endif 105 if (__predict_false(cold)) { 106 static int ctr; 107 /* microtime unsafe if clock not running yet */ 108 return ctr++; 109 } 110 getmicrotime(&tv); 111 return (tv.tv_sec * 1000000 + tv.tv_usec); 112 } 113 114 struct cprng_strong { 115 char cs_name[16]; 116 int cs_flags; 117 kmutex_t cs_lock; 118 percpu_t *cs_percpu; 119 kcondvar_t cs_cv; 120 struct selinfo cs_selq; 121 struct rndsink *cs_rndsink; 122 bool cs_ready; 123 NIST_CTR_DRBG cs_drbg; 124 125 /* XXX Kludge for /dev/random `information-theoretic' properties. */ 126 unsigned int cs_remaining; 127 }; 128 129 struct cprng_strong * 130 cprng_strong_create(const char *name, int ipl, int flags) 131 { 132 const uint32_t cc = cprng_counter(); 133 struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng), 134 KM_SLEEP); 135 136 /* 137 * rndsink_request takes a spin lock at IPL_VM, so we can be no 138 * higher than that. 139 */ 140 KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH); 141 142 /* Initialize the easy fields. */ 143 (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name)); 144 cprng->cs_flags = flags; 145 mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl); 146 cv_init(&cprng->cs_cv, cprng->cs_name); 147 selinit(&cprng->cs_selq); 148 cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES, 149 &cprng_strong_rndsink_callback, cprng); 150 151 /* Get some initial entropy. Record whether it is full entropy. */ 152 uint8_t seed[NIST_BLOCK_KEYLEN_BYTES]; 153 mutex_enter(&cprng->cs_lock); 154 cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed, 155 sizeof(seed)); 156 if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed), 157 &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name))) 158 /* XXX Fix nist_ctr_drbg API so this can't happen. */ 159 panic("cprng %s: NIST CTR_DRBG instantiation failed", 160 cprng->cs_name); 161 explicit_memset(seed, 0, sizeof(seed)); 162 163 if (ISSET(flags, CPRNG_HARD)) 164 cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES; 165 else 166 cprng->cs_remaining = 0; 167 168 if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY)) 169 printf("cprng %s: creating with partial entropy\n", 170 cprng->cs_name); 171 mutex_exit(&cprng->cs_lock); 172 173 return cprng; 174 } 175 176 void 177 cprng_strong_destroy(struct cprng_strong *cprng) 178 { 179 180 /* 181 * Destroy the rndsink first to prevent calls to the callback. 182 */ 183 rndsink_destroy(cprng->cs_rndsink); 184 185 KASSERT(!cv_has_waiters(&cprng->cs_cv)); 186 #if 0 187 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */ 188 #endif 189 190 nist_ctr_drbg_destroy(&cprng->cs_drbg); 191 seldestroy(&cprng->cs_selq); 192 cv_destroy(&cprng->cs_cv); 193 mutex_destroy(&cprng->cs_lock); 194 195 explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */ 196 kmem_free(cprng, sizeof(*cprng)); 197 } 198 199 /* 200 * Generate some data from cprng. Block or return zero bytes, 201 * depending on flags & FNONBLOCK, if cprng was created without 202 * CPRNG_REKEY_ANY. 203 */ 204 size_t 205 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags) 206 { 207 size_t result; 208 209 /* Caller must loop for more than CPRNG_MAX_LEN bytes. */ 210 bytes = MIN(bytes, CPRNG_MAX_LEN); 211 212 mutex_enter(&cprng->cs_lock); 213 214 if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) { 215 if (!cprng->cs_ready) 216 cprng_strong_reseed(cprng); 217 } else { 218 while (!cprng->cs_ready) { 219 if (ISSET(flags, FNONBLOCK) || 220 !ISSET(cprng->cs_flags, CPRNG_USE_CV) || 221 cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) { 222 result = 0; 223 goto out; 224 } 225 } 226 } 227 228 /* 229 * Debit the entropy if requested. 230 * 231 * XXX Kludge for /dev/random `information-theoretic' properties. 232 */ 233 if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) { 234 KASSERT(0 < cprng->cs_remaining); 235 KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES); 236 if (bytes < cprng->cs_remaining) { 237 cprng->cs_remaining -= bytes; 238 } else { 239 bytes = cprng->cs_remaining; 240 cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES; 241 cprng->cs_ready = false; 242 rndsink_schedule(cprng->cs_rndsink); 243 } 244 KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES); 245 KASSERT(0 < cprng->cs_remaining); 246 KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES); 247 } 248 249 cprng_strong_generate(cprng, buffer, bytes); 250 result = bytes; 251 252 out: mutex_exit(&cprng->cs_lock); 253 return result; 254 } 255 256 static void 257 filt_cprng_detach(struct knote *kn) 258 { 259 struct cprng_strong *const cprng = kn->kn_hook; 260 261 mutex_enter(&cprng->cs_lock); 262 SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext); 263 mutex_exit(&cprng->cs_lock); 264 } 265 266 static int 267 filt_cprng_read_event(struct knote *kn, long hint) 268 { 269 struct cprng_strong *const cprng = kn->kn_hook; 270 int ret; 271 272 if (hint == NOTE_SUBMIT) 273 KASSERT(mutex_owned(&cprng->cs_lock)); 274 else 275 mutex_enter(&cprng->cs_lock); 276 if (cprng->cs_ready) { 277 kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */ 278 ret = 1; 279 } else { 280 ret = 0; 281 } 282 if (hint == NOTE_SUBMIT) 283 KASSERT(mutex_owned(&cprng->cs_lock)); 284 else 285 mutex_exit(&cprng->cs_lock); 286 287 return ret; 288 } 289 290 static int 291 filt_cprng_write_event(struct knote *kn, long hint) 292 { 293 struct cprng_strong *const cprng = kn->kn_hook; 294 295 if (hint == NOTE_SUBMIT) 296 KASSERT(mutex_owned(&cprng->cs_lock)); 297 else 298 mutex_enter(&cprng->cs_lock); 299 300 kn->kn_data = 0; 301 302 if (hint == NOTE_SUBMIT) 303 KASSERT(mutex_owned(&cprng->cs_lock)); 304 else 305 mutex_exit(&cprng->cs_lock); 306 307 return 0; 308 } 309 310 static const struct filterops cprng_read_filtops = { 311 .f_isfd = 1, 312 .f_attach = NULL, 313 .f_detach = filt_cprng_detach, 314 .f_event = filt_cprng_read_event, 315 }; 316 317 static const struct filterops cprng_write_filtops = { 318 .f_isfd = 1, 319 .f_attach = NULL, 320 .f_detach = filt_cprng_detach, 321 .f_event = filt_cprng_write_event, 322 }; 323 324 int 325 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn) 326 { 327 328 switch (kn->kn_filter) { 329 case EVFILT_READ: 330 kn->kn_fop = &cprng_read_filtops; 331 break; 332 case EVFILT_WRITE: 333 kn->kn_fop = &cprng_write_filtops; 334 break; 335 default: 336 return EINVAL; 337 } 338 339 kn->kn_hook = cprng; 340 mutex_enter(&cprng->cs_lock); 341 SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext); 342 mutex_exit(&cprng->cs_lock); 343 return 0; 344 } 345 346 int 347 cprng_strong_poll(struct cprng_strong *cprng, int events) 348 { 349 int revents; 350 351 if (!ISSET(events, (POLLIN | POLLRDNORM))) 352 return 0; 353 354 mutex_enter(&cprng->cs_lock); 355 if (cprng->cs_ready) { 356 revents = (events & (POLLIN | POLLRDNORM)); 357 } else { 358 selrecord(curlwp, &cprng->cs_selq); 359 revents = 0; 360 } 361 mutex_exit(&cprng->cs_lock); 362 363 return revents; 364 } 365 366 /* 367 * XXX Move nist_ctr_drbg_reseed_advised_p and 368 * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make 369 * the NIST_CTR_DRBG structure opaque. 370 */ 371 static bool 372 nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg) 373 { 374 375 return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2)); 376 } 377 378 static bool 379 nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg) 380 { 381 382 return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL); 383 } 384 385 /* 386 * Generate some data from the underlying generator. 387 */ 388 static void 389 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes) 390 { 391 const uint32_t cc = cprng_counter(); 392 393 KASSERT(bytes <= CPRNG_MAX_LEN); 394 KASSERT(mutex_owned(&cprng->cs_lock)); 395 396 /* 397 * Generate some data from the NIST CTR_DRBG. Caller 398 * guarantees reseed if we're not ready, and if we exhaust the 399 * generator, we mark ourselves not ready. Consequently, this 400 * call to the CTR_DRBG should not fail. 401 */ 402 if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer, 403 bytes, &cc, sizeof(cc)))) 404 panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name); 405 406 /* 407 * If we've been seeing a lot of use, ask for some fresh 408 * entropy soon. 409 */ 410 if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg))) 411 rndsink_schedule(cprng->cs_rndsink); 412 413 /* 414 * If we just exhausted the generator, inform the next user 415 * that we need a reseed. 416 */ 417 if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) { 418 cprng->cs_ready = false; 419 rndsink_schedule(cprng->cs_rndsink); /* paranoia */ 420 } 421 } 422 423 /* 424 * Reseed with whatever we can get from the system entropy pool right now. 425 */ 426 static void 427 cprng_strong_reseed(struct cprng_strong *cprng) 428 { 429 uint8_t seed[NIST_BLOCK_KEYLEN_BYTES]; 430 431 KASSERT(mutex_owned(&cprng->cs_lock)); 432 433 const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed, 434 sizeof(seed)); 435 cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy); 436 explicit_memset(seed, 0, sizeof(seed)); 437 } 438 439 /* 440 * Reseed with the given seed. If we now have full entropy, notify waiters. 441 */ 442 static void 443 cprng_strong_reseed_from(struct cprng_strong *cprng, 444 const void *seed, size_t bytes, bool full_entropy) 445 { 446 const uint32_t cc = cprng_counter(); 447 448 KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES); 449 KASSERT(mutex_owned(&cprng->cs_lock)); 450 451 /* 452 * Notify anyone interested in the partiality of entropy in our 453 * seed -- anyone waiting for full entropy, or any system 454 * operators interested in knowing when the entropy pool is 455 * running on fumes. 456 */ 457 if (full_entropy) { 458 if (!cprng->cs_ready) { 459 cprng->cs_ready = true; 460 cv_broadcast(&cprng->cs_cv); 461 selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM), 462 NOTE_SUBMIT); 463 } 464 } else { 465 /* 466 * XXX Is there is any harm in reseeding with partial 467 * entropy when we had full entropy before? If so, 468 * remove the conditional on this message. 469 */ 470 if (!cprng->cs_ready && 471 !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) 472 printf("cprng %s: reseeding with partial entropy\n", 473 cprng->cs_name); 474 } 475 476 if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc))) 477 /* XXX Fix nist_ctr_drbg API so this can't happen. */ 478 panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name); 479 480 #if DIAGNOSTIC 481 cprng_strong_rngtest(cprng); 482 #endif 483 } 484 485 #if DIAGNOSTIC 486 /* 487 * Generate some output and apply a statistical RNG test to it. 488 */ 489 static void 490 cprng_strong_rngtest(struct cprng_strong *cprng) 491 { 492 493 KASSERT(mutex_owned(&cprng->cs_lock)); 494 495 /* XXX Switch to a pool cache instead? */ 496 rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP); 497 if (rt == NULL) 498 /* XXX Warn? */ 499 return; 500 501 (void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name)); 502 503 if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b), 504 NULL, 0)) 505 panic("cprng %s: NIST CTR_DRBG failed after reseed", 506 cprng->cs_name); 507 508 if (rngtest(rt)) { 509 printf("cprng %s: failed statistical RNG test\n", 510 cprng->cs_name); 511 /* XXX Not clear that this does any good... */ 512 cprng->cs_ready = false; 513 rndsink_schedule(cprng->cs_rndsink); 514 } 515 516 explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */ 517 kmem_intr_free(rt, sizeof(*rt)); 518 } 519 #endif 520 521 /* 522 * Feed entropy from an rndsink request into the CPRNG for which the 523 * request was issued. 524 */ 525 static void 526 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes) 527 { 528 struct cprng_strong *const cprng = context; 529 530 mutex_enter(&cprng->cs_lock); 531 /* Assume that rndsinks provide only full-entropy output. */ 532 cprng_strong_reseed_from(cprng, seed, bytes, true); 533 mutex_exit(&cprng->cs_lock); 534 } 535 536 static cprng_strong_t *sysctl_prng; 537 538 static int 539 makeprng(void) 540 { 541 542 /* can't create in cprng_init(), too early */ 543 sysctl_prng = cprng_strong_create("sysctl", IPL_NONE, 544 CPRNG_INIT_ANY|CPRNG_REKEY_ANY); 545 return 0; 546 } 547 548 /* 549 * sysctl helper routine for kern.urandom node. Picks a random number 550 * for you. 551 */ 552 static int 553 sysctl_kern_urnd(SYSCTLFN_ARGS) 554 { 555 static ONCE_DECL(control); 556 int v, rv; 557 558 RUN_ONCE(&control, makeprng); 559 rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0); 560 if (rv == sizeof(v)) { 561 struct sysctlnode node = *rnode; 562 node.sysctl_data = &v; 563 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 564 } 565 else 566 return (EIO); /*XXX*/ 567 } 568 569 /* 570 * sysctl helper routine for kern.arandom node. Fills the supplied 571 * structure with random data for you. 572 * 573 * This node was originally declared as type "int" but its implementation 574 * in OpenBSD, whence it came, would happily return up to 8K of data if 575 * requested. Evidently this was used to key RC4 in userspace. 576 * 577 * In NetBSD, the libc stack-smash-protection code reads 64 bytes 578 * from here at every program startup. So though it would be nice 579 * to make this node return only 32 or 64 bits, we can't. Too bad! 580 */ 581 static int 582 sysctl_kern_arnd(SYSCTLFN_ARGS) 583 { 584 int error; 585 void *v; 586 struct sysctlnode node = *rnode; 587 588 switch (*oldlenp) { 589 case 0: 590 return 0; 591 default: 592 if (*oldlenp > 256) { 593 return E2BIG; 594 } 595 v = kmem_alloc(*oldlenp, KM_SLEEP); 596 cprng_fast(v, *oldlenp); 597 node.sysctl_data = v; 598 node.sysctl_size = *oldlenp; 599 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 600 kmem_free(v, *oldlenp); 601 return error; 602 } 603 } 604