1 /* $NetBSD: subr_cprng.c,v 1.34 2019/12/04 05:36:34 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Thor Lancelot Simon and Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.34 2019/12/04 05:36:34 riastradh Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/types.h> 37 #include <sys/condvar.h> 38 #include <sys/cprng.h> 39 #include <sys/errno.h> 40 #include <sys/event.h> /* XXX struct knote */ 41 #include <sys/fcntl.h> /* XXX FNONBLOCK */ 42 #include <sys/kernel.h> 43 #include <sys/kmem.h> 44 #include <sys/lwp.h> 45 #include <sys/once.h> 46 #include <sys/percpu.h> 47 #include <sys/poll.h> /* XXX POLLIN/POLLOUT/&c. */ 48 #include <sys/select.h> 49 #include <sys/systm.h> 50 #include <sys/sysctl.h> 51 #include <sys/rndsink.h> 52 53 #include <crypto/nist_hash_drbg/nist_hash_drbg.h> 54 55 #if defined(__HAVE_CPU_COUNTER) 56 #include <machine/cpu_counter.h> 57 #endif 58 59 static int sysctl_kern_urnd(SYSCTLFN_PROTO); 60 static int sysctl_kern_arnd(SYSCTLFN_PROTO); 61 62 static void cprng_strong_generate(struct cprng_strong *, void *, size_t); 63 static void cprng_strong_reseed(struct cprng_strong *); 64 static void cprng_strong_reseed_from(struct cprng_strong *, const void *, 65 size_t, bool); 66 67 static rndsink_callback_t cprng_strong_rndsink_callback; 68 69 void 70 cprng_init(void) 71 { 72 static struct sysctllog *random_sysctllog; 73 74 if (nist_hash_drbg_initialize() != 0) 75 panic("NIST Hash_DRBG failed self-test"); 76 77 sysctl_createv(&random_sysctllog, 0, NULL, NULL, 78 CTLFLAG_PERMANENT, 79 CTLTYPE_INT, "urandom", 80 SYSCTL_DESCR("Random integer value"), 81 sysctl_kern_urnd, 0, NULL, 0, 82 CTL_KERN, KERN_URND, CTL_EOL); 83 sysctl_createv(&random_sysctllog, 0, NULL, NULL, 84 CTLFLAG_PERMANENT, 85 CTLTYPE_INT, "arandom", 86 SYSCTL_DESCR("n bytes of random data"), 87 sysctl_kern_arnd, 0, NULL, 0, 88 CTL_KERN, KERN_ARND, CTL_EOL); 89 } 90 91 static inline uint32_t 92 cprng_counter(void) 93 { 94 struct timeval tv; 95 96 #if defined(__HAVE_CPU_COUNTER) 97 if (cpu_hascounter()) 98 return cpu_counter32(); 99 #endif 100 if (__predict_false(cold)) { 101 static int ctr; 102 /* microtime unsafe if clock not running yet */ 103 return ctr++; 104 } 105 getmicrotime(&tv); 106 return (tv.tv_sec * 1000000 + tv.tv_usec); 107 } 108 109 struct cprng_strong { 110 char cs_name[16]; 111 int cs_flags; 112 kmutex_t cs_lock; 113 percpu_t *cs_percpu; 114 kcondvar_t cs_cv; 115 struct selinfo cs_selq; 116 struct rndsink *cs_rndsink; 117 bool cs_ready; 118 NIST_HASH_DRBG cs_drbg; 119 120 /* XXX Kludge for /dev/random `information-theoretic' properties. */ 121 unsigned int cs_remaining; 122 }; 123 124 struct cprng_strong * 125 cprng_strong_create(const char *name, int ipl, int flags) 126 { 127 const uint32_t cc = cprng_counter(); 128 struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng), 129 KM_SLEEP); 130 131 /* 132 * rndsink_request takes a spin lock at IPL_VM, so we can be no 133 * higher than that. 134 */ 135 KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH); 136 137 /* Initialize the easy fields. */ 138 memset(cprng->cs_name, 0, sizeof(cprng->cs_name)); 139 (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name)); 140 cprng->cs_flags = flags; 141 mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl); 142 cv_init(&cprng->cs_cv, cprng->cs_name); 143 selinit(&cprng->cs_selq); 144 cprng->cs_rndsink = rndsink_create(NIST_HASH_DRBG_MIN_SEEDLEN_BYTES, 145 &cprng_strong_rndsink_callback, cprng); 146 147 /* Get some initial entropy. Record whether it is full entropy. */ 148 uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES]; 149 mutex_enter(&cprng->cs_lock); 150 cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed, 151 sizeof(seed)); 152 if (nist_hash_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed), 153 &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name))) 154 /* XXX Fix nist_hash_drbg API so this can't happen. */ 155 panic("cprng %s: NIST Hash_DRBG instantiation failed", 156 cprng->cs_name); 157 explicit_memset(seed, 0, sizeof(seed)); 158 159 if (ISSET(flags, CPRNG_HARD)) 160 cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES; 161 else 162 cprng->cs_remaining = 0; 163 164 if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY)) 165 printf("cprng %s: creating with partial entropy\n", 166 cprng->cs_name); 167 mutex_exit(&cprng->cs_lock); 168 169 return cprng; 170 } 171 172 void 173 cprng_strong_destroy(struct cprng_strong *cprng) 174 { 175 176 /* 177 * Destroy the rndsink first to prevent calls to the callback. 178 */ 179 rndsink_destroy(cprng->cs_rndsink); 180 181 KASSERT(!cv_has_waiters(&cprng->cs_cv)); 182 #if 0 183 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */ 184 #endif 185 186 nist_hash_drbg_destroy(&cprng->cs_drbg); 187 seldestroy(&cprng->cs_selq); 188 cv_destroy(&cprng->cs_cv); 189 mutex_destroy(&cprng->cs_lock); 190 191 explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */ 192 kmem_free(cprng, sizeof(*cprng)); 193 } 194 195 /* 196 * Generate some data from cprng. Block or return zero bytes, 197 * depending on flags & FNONBLOCK, if cprng was created without 198 * CPRNG_REKEY_ANY. 199 */ 200 size_t 201 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags) 202 { 203 size_t result; 204 205 /* Caller must loop for more than CPRNG_MAX_LEN bytes. */ 206 bytes = MIN(bytes, CPRNG_MAX_LEN); 207 208 mutex_enter(&cprng->cs_lock); 209 210 if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) { 211 if (!cprng->cs_ready) 212 cprng_strong_reseed(cprng); 213 } else { 214 while (!cprng->cs_ready) { 215 if (ISSET(flags, FNONBLOCK) || 216 !ISSET(cprng->cs_flags, CPRNG_USE_CV) || 217 cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) { 218 result = 0; 219 goto out; 220 } 221 } 222 } 223 224 /* 225 * Debit the entropy if requested. 226 * 227 * XXX Kludge for /dev/random `information-theoretic' properties. 228 */ 229 if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) { 230 KASSERT(0 < cprng->cs_remaining); 231 KASSERT(cprng->cs_remaining <= 232 NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); 233 if (bytes < cprng->cs_remaining) { 234 cprng->cs_remaining -= bytes; 235 } else { 236 bytes = cprng->cs_remaining; 237 cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES; 238 cprng->cs_ready = false; 239 rndsink_schedule(cprng->cs_rndsink); 240 } 241 KASSERT(bytes <= NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); 242 KASSERT(0 < cprng->cs_remaining); 243 KASSERT(cprng->cs_remaining <= 244 NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); 245 } 246 247 cprng_strong_generate(cprng, buffer, bytes); 248 result = bytes; 249 250 out: mutex_exit(&cprng->cs_lock); 251 return result; 252 } 253 254 static void 255 filt_cprng_detach(struct knote *kn) 256 { 257 struct cprng_strong *const cprng = kn->kn_hook; 258 259 mutex_enter(&cprng->cs_lock); 260 SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext); 261 mutex_exit(&cprng->cs_lock); 262 } 263 264 static int 265 filt_cprng_read_event(struct knote *kn, long hint) 266 { 267 struct cprng_strong *const cprng = kn->kn_hook; 268 int ret; 269 270 if (hint == NOTE_SUBMIT) 271 KASSERT(mutex_owned(&cprng->cs_lock)); 272 else 273 mutex_enter(&cprng->cs_lock); 274 if (cprng->cs_ready) { 275 kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */ 276 ret = 1; 277 } else { 278 ret = 0; 279 } 280 if (hint == NOTE_SUBMIT) 281 KASSERT(mutex_owned(&cprng->cs_lock)); 282 else 283 mutex_exit(&cprng->cs_lock); 284 285 return ret; 286 } 287 288 static int 289 filt_cprng_write_event(struct knote *kn, long hint) 290 { 291 struct cprng_strong *const cprng = kn->kn_hook; 292 293 if (hint == NOTE_SUBMIT) 294 KASSERT(mutex_owned(&cprng->cs_lock)); 295 else 296 mutex_enter(&cprng->cs_lock); 297 298 kn->kn_data = 0; 299 300 if (hint == NOTE_SUBMIT) 301 KASSERT(mutex_owned(&cprng->cs_lock)); 302 else 303 mutex_exit(&cprng->cs_lock); 304 305 return 0; 306 } 307 308 static const struct filterops cprng_read_filtops = { 309 .f_isfd = 1, 310 .f_attach = NULL, 311 .f_detach = filt_cprng_detach, 312 .f_event = filt_cprng_read_event, 313 }; 314 315 static const struct filterops cprng_write_filtops = { 316 .f_isfd = 1, 317 .f_attach = NULL, 318 .f_detach = filt_cprng_detach, 319 .f_event = filt_cprng_write_event, 320 }; 321 322 int 323 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn) 324 { 325 326 switch (kn->kn_filter) { 327 case EVFILT_READ: 328 kn->kn_fop = &cprng_read_filtops; 329 break; 330 case EVFILT_WRITE: 331 kn->kn_fop = &cprng_write_filtops; 332 break; 333 default: 334 return EINVAL; 335 } 336 337 kn->kn_hook = cprng; 338 mutex_enter(&cprng->cs_lock); 339 SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext); 340 mutex_exit(&cprng->cs_lock); 341 return 0; 342 } 343 344 int 345 cprng_strong_poll(struct cprng_strong *cprng, int events) 346 { 347 int revents; 348 349 if (!ISSET(events, (POLLIN | POLLRDNORM))) 350 return 0; 351 352 mutex_enter(&cprng->cs_lock); 353 if (cprng->cs_ready) { 354 revents = (events & (POLLIN | POLLRDNORM)); 355 } else { 356 selrecord(curlwp, &cprng->cs_selq); 357 revents = 0; 358 } 359 mutex_exit(&cprng->cs_lock); 360 361 return revents; 362 } 363 364 /* 365 * XXX Move nist_hash_drbg_reseed_advised_p and 366 * nist_hash_drbg_reseed_needed_p into the nist_hash_drbg API and make 367 * the NIST_HASH_DRBG structure opaque. 368 */ 369 static bool 370 nist_hash_drbg_reseed_advised_p(NIST_HASH_DRBG *drbg) 371 { 372 373 return (drbg->reseed_counter > (NIST_HASH_DRBG_RESEED_INTERVAL / 2)); 374 } 375 376 static bool 377 nist_hash_drbg_reseed_needed_p(NIST_HASH_DRBG *drbg) 378 { 379 380 return (drbg->reseed_counter >= NIST_HASH_DRBG_RESEED_INTERVAL); 381 } 382 383 /* 384 * Generate some data from the underlying generator. 385 */ 386 static void 387 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes) 388 { 389 const uint32_t cc = cprng_counter(); 390 391 KASSERT(bytes <= CPRNG_MAX_LEN); 392 KASSERT(mutex_owned(&cprng->cs_lock)); 393 394 /* 395 * Generate some data from the NIST Hash_DRBG. Caller 396 * guarantees reseed if we're not ready, and if we exhaust the 397 * generator, we mark ourselves not ready. Consequently, this 398 * call to the Hash_DRBG should not fail. 399 */ 400 if (__predict_false(nist_hash_drbg_generate(&cprng->cs_drbg, buffer, 401 bytes, &cc, sizeof(cc)))) 402 panic("cprng %s: NIST Hash_DRBG failed", cprng->cs_name); 403 404 /* 405 * If we've been seeing a lot of use, ask for some fresh 406 * entropy soon. 407 */ 408 if (__predict_false(nist_hash_drbg_reseed_advised_p(&cprng->cs_drbg))) 409 rndsink_schedule(cprng->cs_rndsink); 410 411 /* 412 * If we just exhausted the generator, inform the next user 413 * that we need a reseed. 414 */ 415 if (__predict_false(nist_hash_drbg_reseed_needed_p(&cprng->cs_drbg))) { 416 cprng->cs_ready = false; 417 rndsink_schedule(cprng->cs_rndsink); /* paranoia */ 418 } 419 } 420 421 /* 422 * Reseed with whatever we can get from the system entropy pool right now. 423 */ 424 static void 425 cprng_strong_reseed(struct cprng_strong *cprng) 426 { 427 uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES]; 428 429 KASSERT(mutex_owned(&cprng->cs_lock)); 430 431 const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed, 432 sizeof(seed)); 433 cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy); 434 explicit_memset(seed, 0, sizeof(seed)); 435 } 436 437 /* 438 * Reseed with the given seed. If we now have full entropy, notify waiters. 439 */ 440 static void 441 cprng_strong_reseed_from(struct cprng_strong *cprng, 442 const void *seed, size_t bytes, bool full_entropy) 443 { 444 const uint32_t cc = cprng_counter(); 445 446 KASSERT(bytes == NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); 447 KASSERT(mutex_owned(&cprng->cs_lock)); 448 449 /* 450 * Notify anyone interested in the partiality of entropy in our 451 * seed -- anyone waiting for full entropy, or any system 452 * operators interested in knowing when the entropy pool is 453 * running on fumes. 454 */ 455 if (full_entropy) { 456 if (!cprng->cs_ready) { 457 cprng->cs_ready = true; 458 cv_broadcast(&cprng->cs_cv); 459 selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM), 460 NOTE_SUBMIT); 461 } 462 } else { 463 /* 464 * XXX Is there is any harm in reseeding with partial 465 * entropy when we had full entropy before? If so, 466 * remove the conditional on this message. 467 */ 468 if (!cprng->cs_ready && 469 !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) 470 printf("cprng %s: reseeding with partial entropy\n", 471 cprng->cs_name); 472 } 473 474 if (nist_hash_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, 475 sizeof(cc))) 476 /* XXX Fix nist_hash_drbg API so this can't happen. */ 477 panic("cprng %s: NIST Hash_DRBG reseed failed", 478 cprng->cs_name); 479 } 480 481 /* 482 * Feed entropy from an rndsink request into the CPRNG for which the 483 * request was issued. 484 */ 485 static void 486 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes) 487 { 488 struct cprng_strong *const cprng = context; 489 490 mutex_enter(&cprng->cs_lock); 491 /* Assume that rndsinks provide only full-entropy output. */ 492 cprng_strong_reseed_from(cprng, seed, bytes, true); 493 mutex_exit(&cprng->cs_lock); 494 } 495 496 static ONCE_DECL(sysctl_prng_once); 497 static cprng_strong_t *sysctl_prng; 498 499 static int 500 makeprng(void) 501 { 502 503 /* can't create in cprng_init(), too early */ 504 sysctl_prng = cprng_strong_create("sysctl", IPL_NONE, 505 CPRNG_INIT_ANY|CPRNG_REKEY_ANY); 506 return 0; 507 } 508 509 /* 510 * sysctl helper routine for kern.urandom node. Picks a random number 511 * for you. 512 */ 513 static int 514 sysctl_kern_urnd(SYSCTLFN_ARGS) 515 { 516 int v, rv; 517 518 RUN_ONCE(&sysctl_prng_once, makeprng); 519 rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0); 520 if (rv == sizeof(v)) { 521 struct sysctlnode node = *rnode; 522 node.sysctl_data = &v; 523 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 524 } 525 else 526 return (EIO); /*XXX*/ 527 } 528 529 /* 530 * sysctl helper routine for kern.arandom node. Fills the supplied 531 * structure with random data for you. 532 * 533 * This node was originally declared as type "int" but its implementation 534 * in OpenBSD, whence it came, would happily return up to 8K of data if 535 * requested. Evidently this was used to key RC4 in userspace. 536 * 537 * In NetBSD, the libc stack-smash-protection code reads 64 bytes 538 * from here at every program startup. Third-party software also often 539 * uses this to obtain a key for CSPRNG, reading 32 bytes or more, while 540 * avoiding the need to open /dev/urandom. 541 */ 542 static int 543 sysctl_kern_arnd(SYSCTLFN_ARGS) 544 { 545 int error; 546 void *v; 547 struct sysctlnode node = *rnode; 548 size_t n __diagused; 549 550 switch (*oldlenp) { 551 case 0: 552 return 0; 553 default: 554 if (*oldlenp > 256) { 555 return E2BIG; 556 } 557 RUN_ONCE(&sysctl_prng_once, makeprng); 558 v = kmem_alloc(*oldlenp, KM_SLEEP); 559 n = cprng_strong(sysctl_prng, v, *oldlenp, 0); 560 KASSERT(n == *oldlenp); 561 node.sysctl_data = v; 562 node.sysctl_size = *oldlenp; 563 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 564 kmem_free(v, *oldlenp); 565 return error; 566 } 567 } 568