1 /* $NetBSD: subr_cprng.c,v 1.25 2014/08/14 16:28:30 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Thor Lancelot Simon and Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.25 2014/08/14 16:28:30 riastradh Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/types.h> 37 #include <sys/condvar.h> 38 #include <sys/cprng.h> 39 #include <sys/errno.h> 40 #include <sys/event.h> /* XXX struct knote */ 41 #include <sys/fcntl.h> /* XXX FNONBLOCK */ 42 #include <sys/kernel.h> 43 #include <sys/kmem.h> 44 #include <sys/lwp.h> 45 #include <sys/once.h> 46 #include <sys/percpu.h> 47 #include <sys/poll.h> /* XXX POLLIN/POLLOUT/&c. */ 48 #include <sys/select.h> 49 #include <sys/systm.h> 50 #include <sys/sysctl.h> 51 #include <sys/rnd.h> 52 #include <sys/rndsink.h> 53 #if DEBUG 54 #include <sys/rngtest.h> 55 #endif 56 57 #include <crypto/nist_ctr_drbg/nist_ctr_drbg.h> 58 59 #if defined(__HAVE_CPU_COUNTER) 60 #include <machine/cpu_counter.h> 61 #endif 62 63 static int sysctl_kern_urnd(SYSCTLFN_PROTO); 64 static int sysctl_kern_arnd(SYSCTLFN_PROTO); 65 66 static void cprng_strong_generate(struct cprng_strong *, void *, size_t); 67 static void cprng_strong_reseed(struct cprng_strong *); 68 static void cprng_strong_reseed_from(struct cprng_strong *, const void *, 69 size_t, bool); 70 #if DEBUG 71 static void cprng_strong_rngtest(struct cprng_strong *); 72 #endif 73 74 static rndsink_callback_t cprng_strong_rndsink_callback; 75 76 void 77 cprng_init(void) 78 { 79 static struct sysctllog *random_sysctllog; 80 81 nist_ctr_initialize(); 82 83 sysctl_createv(&random_sysctllog, 0, NULL, NULL, 84 CTLFLAG_PERMANENT, 85 CTLTYPE_INT, "urandom", 86 SYSCTL_DESCR("Random integer value"), 87 sysctl_kern_urnd, 0, NULL, 0, 88 CTL_KERN, KERN_URND, CTL_EOL); 89 sysctl_createv(&random_sysctllog, 0, NULL, NULL, 90 CTLFLAG_PERMANENT, 91 CTLTYPE_INT, "arandom", 92 SYSCTL_DESCR("n bytes of random data"), 93 sysctl_kern_arnd, 0, NULL, 0, 94 CTL_KERN, KERN_ARND, CTL_EOL); 95 } 96 97 static inline uint32_t 98 cprng_counter(void) 99 { 100 struct timeval tv; 101 102 #if defined(__HAVE_CPU_COUNTER) 103 if (cpu_hascounter()) 104 return cpu_counter32(); 105 #endif 106 if (__predict_false(cold)) { 107 static int ctr; 108 /* microtime unsafe if clock not running yet */ 109 return ctr++; 110 } 111 getmicrotime(&tv); 112 return (tv.tv_sec * 1000000 + tv.tv_usec); 113 } 114 115 struct cprng_strong { 116 char cs_name[16]; 117 int cs_flags; 118 kmutex_t cs_lock; 119 percpu_t *cs_percpu; 120 kcondvar_t cs_cv; 121 struct selinfo cs_selq; 122 struct rndsink *cs_rndsink; 123 bool cs_ready; 124 NIST_CTR_DRBG cs_drbg; 125 126 /* XXX Kludge for /dev/random `information-theoretic' properties. */ 127 unsigned int cs_remaining; 128 }; 129 130 struct cprng_strong * 131 cprng_strong_create(const char *name, int ipl, int flags) 132 { 133 const uint32_t cc = cprng_counter(); 134 struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng), 135 KM_SLEEP); 136 137 /* 138 * rndsink_request takes a spin lock at IPL_VM, so we can be no 139 * higher than that. 140 */ 141 KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH); 142 143 /* Initialize the easy fields. */ 144 (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name)); 145 cprng->cs_flags = flags; 146 mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl); 147 cv_init(&cprng->cs_cv, cprng->cs_name); 148 selinit(&cprng->cs_selq); 149 cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES, 150 &cprng_strong_rndsink_callback, cprng); 151 152 /* Get some initial entropy. Record whether it is full entropy. */ 153 uint8_t seed[NIST_BLOCK_KEYLEN_BYTES]; 154 mutex_enter(&cprng->cs_lock); 155 cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed, 156 sizeof(seed)); 157 if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed), 158 &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name))) 159 /* XXX Fix nist_ctr_drbg API so this can't happen. */ 160 panic("cprng %s: NIST CTR_DRBG instantiation failed", 161 cprng->cs_name); 162 explicit_memset(seed, 0, sizeof(seed)); 163 164 if (ISSET(flags, CPRNG_HARD)) 165 cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES; 166 else 167 cprng->cs_remaining = 0; 168 169 if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY)) 170 printf("cprng %s: creating with partial entropy\n", 171 cprng->cs_name); 172 mutex_exit(&cprng->cs_lock); 173 174 return cprng; 175 } 176 177 void 178 cprng_strong_destroy(struct cprng_strong *cprng) 179 { 180 181 /* 182 * Destroy the rndsink first to prevent calls to the callback. 183 */ 184 rndsink_destroy(cprng->cs_rndsink); 185 186 KASSERT(!cv_has_waiters(&cprng->cs_cv)); 187 #if 0 188 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */ 189 #endif 190 191 nist_ctr_drbg_destroy(&cprng->cs_drbg); 192 seldestroy(&cprng->cs_selq); 193 cv_destroy(&cprng->cs_cv); 194 mutex_destroy(&cprng->cs_lock); 195 196 explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */ 197 kmem_free(cprng, sizeof(*cprng)); 198 } 199 200 /* 201 * Generate some data from cprng. Block or return zero bytes, 202 * depending on flags & FNONBLOCK, if cprng was created without 203 * CPRNG_REKEY_ANY. 204 */ 205 size_t 206 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags) 207 { 208 size_t result; 209 210 /* Caller must loop for more than CPRNG_MAX_LEN bytes. */ 211 bytes = MIN(bytes, CPRNG_MAX_LEN); 212 213 mutex_enter(&cprng->cs_lock); 214 215 if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) { 216 if (!cprng->cs_ready) 217 cprng_strong_reseed(cprng); 218 } else { 219 while (!cprng->cs_ready) { 220 if (ISSET(flags, FNONBLOCK) || 221 !ISSET(cprng->cs_flags, CPRNG_USE_CV) || 222 cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) { 223 result = 0; 224 goto out; 225 } 226 } 227 } 228 229 /* 230 * Debit the entropy if requested. 231 * 232 * XXX Kludge for /dev/random `information-theoretic' properties. 233 */ 234 if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) { 235 KASSERT(0 < cprng->cs_remaining); 236 KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES); 237 if (bytes < cprng->cs_remaining) { 238 cprng->cs_remaining -= bytes; 239 } else { 240 bytes = cprng->cs_remaining; 241 cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES; 242 cprng->cs_ready = false; 243 rndsink_schedule(cprng->cs_rndsink); 244 } 245 KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES); 246 KASSERT(0 < cprng->cs_remaining); 247 KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES); 248 } 249 250 cprng_strong_generate(cprng, buffer, bytes); 251 result = bytes; 252 253 out: mutex_exit(&cprng->cs_lock); 254 return result; 255 } 256 257 static void filt_cprng_detach(struct knote *); 258 static int filt_cprng_event(struct knote *, long); 259 260 static const struct filterops cprng_filtops = 261 { 1, NULL, filt_cprng_detach, filt_cprng_event }; 262 263 int 264 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn) 265 { 266 267 switch (kn->kn_filter) { 268 case EVFILT_READ: 269 kn->kn_fop = &cprng_filtops; 270 kn->kn_hook = cprng; 271 mutex_enter(&cprng->cs_lock); 272 SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext); 273 mutex_exit(&cprng->cs_lock); 274 return 0; 275 276 case EVFILT_WRITE: 277 default: 278 return EINVAL; 279 } 280 } 281 282 static void 283 filt_cprng_detach(struct knote *kn) 284 { 285 struct cprng_strong *const cprng = kn->kn_hook; 286 287 mutex_enter(&cprng->cs_lock); 288 SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext); 289 mutex_exit(&cprng->cs_lock); 290 } 291 292 static int 293 filt_cprng_event(struct knote *kn, long hint) 294 { 295 struct cprng_strong *const cprng = kn->kn_hook; 296 int ret; 297 298 if (hint == NOTE_SUBMIT) 299 KASSERT(mutex_owned(&cprng->cs_lock)); 300 else 301 mutex_enter(&cprng->cs_lock); 302 if (cprng->cs_ready) { 303 kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */ 304 ret = 1; 305 } else { 306 ret = 0; 307 } 308 if (hint == NOTE_SUBMIT) 309 KASSERT(mutex_owned(&cprng->cs_lock)); 310 else 311 mutex_exit(&cprng->cs_lock); 312 313 return ret; 314 } 315 316 int 317 cprng_strong_poll(struct cprng_strong *cprng, int events) 318 { 319 int revents; 320 321 if (!ISSET(events, (POLLIN | POLLRDNORM))) 322 return 0; 323 324 mutex_enter(&cprng->cs_lock); 325 if (cprng->cs_ready) { 326 revents = (events & (POLLIN | POLLRDNORM)); 327 } else { 328 selrecord(curlwp, &cprng->cs_selq); 329 revents = 0; 330 } 331 mutex_exit(&cprng->cs_lock); 332 333 return revents; 334 } 335 336 /* 337 * XXX Move nist_ctr_drbg_reseed_advised_p and 338 * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make 339 * the NIST_CTR_DRBG structure opaque. 340 */ 341 static bool 342 nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg) 343 { 344 345 return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2)); 346 } 347 348 static bool 349 nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg) 350 { 351 352 return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL); 353 } 354 355 /* 356 * Generate some data from the underlying generator. 357 */ 358 static void 359 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes) 360 { 361 const uint32_t cc = cprng_counter(); 362 363 KASSERT(bytes <= CPRNG_MAX_LEN); 364 KASSERT(mutex_owned(&cprng->cs_lock)); 365 366 /* 367 * Generate some data from the NIST CTR_DRBG. Caller 368 * guarantees reseed if we're not ready, and if we exhaust the 369 * generator, we mark ourselves not ready. Consequently, this 370 * call to the CTR_DRBG should not fail. 371 */ 372 if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer, 373 bytes, &cc, sizeof(cc)))) 374 panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name); 375 376 /* 377 * If we've been seeing a lot of use, ask for some fresh 378 * entropy soon. 379 */ 380 if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg))) 381 rndsink_schedule(cprng->cs_rndsink); 382 383 /* 384 * If we just exhausted the generator, inform the next user 385 * that we need a reseed. 386 */ 387 if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) { 388 cprng->cs_ready = false; 389 rndsink_schedule(cprng->cs_rndsink); /* paranoia */ 390 } 391 } 392 393 /* 394 * Reseed with whatever we can get from the system entropy pool right now. 395 */ 396 static void 397 cprng_strong_reseed(struct cprng_strong *cprng) 398 { 399 uint8_t seed[NIST_BLOCK_KEYLEN_BYTES]; 400 401 KASSERT(mutex_owned(&cprng->cs_lock)); 402 403 const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed, 404 sizeof(seed)); 405 cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy); 406 explicit_memset(seed, 0, sizeof(seed)); 407 } 408 409 /* 410 * Reseed with the given seed. If we now have full entropy, notify waiters. 411 */ 412 static void 413 cprng_strong_reseed_from(struct cprng_strong *cprng, 414 const void *seed, size_t bytes, bool full_entropy) 415 { 416 const uint32_t cc = cprng_counter(); 417 418 KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES); 419 KASSERT(mutex_owned(&cprng->cs_lock)); 420 421 /* 422 * Notify anyone interested in the partiality of entropy in our 423 * seed -- anyone waiting for full entropy, or any system 424 * operators interested in knowing when the entropy pool is 425 * running on fumes. 426 */ 427 if (full_entropy) { 428 if (!cprng->cs_ready) { 429 cprng->cs_ready = true; 430 cv_broadcast(&cprng->cs_cv); 431 selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM), 432 NOTE_SUBMIT); 433 } 434 } else { 435 /* 436 * XXX Is there is any harm in reseeding with partial 437 * entropy when we had full entropy before? If so, 438 * remove the conditional on this message. 439 */ 440 if (!cprng->cs_ready && 441 !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) 442 printf("cprng %s: reseeding with partial entropy\n", 443 cprng->cs_name); 444 } 445 446 if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc))) 447 /* XXX Fix nist_ctr_drbg API so this can't happen. */ 448 panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name); 449 450 #if DEBUG 451 cprng_strong_rngtest(cprng); 452 #endif 453 } 454 455 #if DEBUG 456 /* 457 * Generate some output and apply a statistical RNG test to it. 458 */ 459 static void 460 cprng_strong_rngtest(struct cprng_strong *cprng) 461 { 462 463 KASSERT(mutex_owned(&cprng->cs_lock)); 464 465 /* XXX Switch to a pool cache instead? */ 466 rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP); 467 if (rt == NULL) 468 /* XXX Warn? */ 469 return; 470 471 (void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name)); 472 473 if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b), 474 NULL, 0)) 475 panic("cprng %s: NIST CTR_DRBG failed after reseed", 476 cprng->cs_name); 477 478 if (rngtest(rt)) { 479 printf("cprng %s: failed statistical RNG test\n", 480 cprng->cs_name); 481 /* XXX Not clear that this does any good... */ 482 cprng->cs_ready = false; 483 rndsink_schedule(cprng->cs_rndsink); 484 } 485 486 explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */ 487 kmem_intr_free(rt, sizeof(*rt)); 488 } 489 #endif 490 491 /* 492 * Feed entropy from an rndsink request into the CPRNG for which the 493 * request was issued. 494 */ 495 static void 496 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes) 497 { 498 struct cprng_strong *const cprng = context; 499 500 mutex_enter(&cprng->cs_lock); 501 /* Assume that rndsinks provide only full-entropy output. */ 502 cprng_strong_reseed_from(cprng, seed, bytes, true); 503 mutex_exit(&cprng->cs_lock); 504 } 505 506 static cprng_strong_t *sysctl_prng; 507 508 static int 509 makeprng(void) 510 { 511 512 /* can't create in cprng_init(), too early */ 513 sysctl_prng = cprng_strong_create("sysctl", IPL_NONE, 514 CPRNG_INIT_ANY|CPRNG_REKEY_ANY); 515 return 0; 516 } 517 518 /* 519 * sysctl helper routine for kern.urandom node. Picks a random number 520 * for you. 521 */ 522 static int 523 sysctl_kern_urnd(SYSCTLFN_ARGS) 524 { 525 static ONCE_DECL(control); 526 int v, rv; 527 528 RUN_ONCE(&control, makeprng); 529 rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0); 530 if (rv == sizeof(v)) { 531 struct sysctlnode node = *rnode; 532 node.sysctl_data = &v; 533 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 534 } 535 else 536 return (EIO); /*XXX*/ 537 } 538 539 /* 540 * sysctl helper routine for kern.arandom node. Fills the supplied 541 * structure with random data for you. 542 * 543 * This node was originally declared as type "int" but its implementation 544 * in OpenBSD, whence it came, would happily return up to 8K of data if 545 * requested. Evidently this was used to key RC4 in userspace. 546 * 547 * In NetBSD, the libc stack-smash-protection code reads 64 bytes 548 * from here at every program startup. So though it would be nice 549 * to make this node return only 32 or 64 bits, we can't. Too bad! 550 */ 551 static int 552 sysctl_kern_arnd(SYSCTLFN_ARGS) 553 { 554 int error; 555 void *v; 556 struct sysctlnode node = *rnode; 557 558 switch (*oldlenp) { 559 case 0: 560 return 0; 561 default: 562 if (*oldlenp > 256) { 563 return E2BIG; 564 } 565 v = kmem_alloc(*oldlenp, KM_SLEEP); 566 cprng_fast(v, *oldlenp); 567 node.sysctl_data = v; 568 node.sysctl_size = *oldlenp; 569 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 570 kmem_free(v, *oldlenp); 571 return error; 572 } 573 } 574