1 /* $NetBSD: subr_cprng.c,v 1.44 2023/08/05 11:21:24 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * cprng_strong 34 * 35 * Per-CPU NIST Hash_DRBG, reseeded automatically from the entropy 36 * pool when we transition to full entropy, never blocking. This 37 * is slightly different from the old cprng_strong API, but the 38 * only users of the old one fell into three categories: 39 * 40 * 1. never-blocking, oughta-be-per-CPU (kern_cprng, sysctl_prng) 41 * 2. never-blocking, used per-CPU anyway (/dev/urandom short reads) 42 * 3. /dev/random 43 * 44 * This code serves the first two categories without having extra 45 * logic for /dev/random. 46 * 47 * kern_cprng - available at IPL_SOFTSERIAL or lower 48 * user_cprng - available only at IPL_NONE in thread context 49 * 50 * The name kern_cprng is for hysterical raisins. The name 51 * user_cprng serves only to contrast with kern_cprng. 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.44 2023/08/05 11:21:24 riastradh Exp $"); 56 57 #include <sys/param.h> 58 #include <sys/types.h> 59 #include <sys/cprng.h> 60 #include <sys/cpu.h> 61 #include <sys/entropy.h> 62 #include <sys/errno.h> 63 #include <sys/evcnt.h> 64 #include <sys/intr.h> 65 #include <sys/kmem.h> 66 #include <sys/percpu.h> 67 #include <sys/sysctl.h> 68 #include <sys/systm.h> 69 70 #include <crypto/nist_hash_drbg/nist_hash_drbg.h> 71 72 /* 73 * struct cprng_strong 74 */ 75 struct cprng_strong { 76 struct percpu *cs_percpu; /* struct cprng_cpu */ 77 ipl_cookie_t cs_iplcookie; 78 }; 79 80 /* 81 * struct cprng_cpu 82 * 83 * Per-CPU state for a cprng_strong. The DRBG and evcnt are 84 * allocated separately because percpu(9) sometimes moves per-CPU 85 * objects around without zeroing them. 86 */ 87 struct cprng_cpu { 88 struct nist_hash_drbg *cc_drbg; 89 struct { 90 struct evcnt reseed; 91 } *cc_evcnt; 92 unsigned cc_epoch; 93 }; 94 95 static int sysctl_kern_urandom(SYSCTLFN_ARGS); 96 static int sysctl_kern_arandom(SYSCTLFN_ARGS); 97 static void cprng_init_cpu(void *, void *, struct cpu_info *); 98 static void cprng_fini_cpu(void *, void *, struct cpu_info *); 99 100 /* Well-known CPRNG instances */ 101 struct cprng_strong *kern_cprng __read_mostly; /* IPL_SOFTSERIAL */ 102 struct cprng_strong *user_cprng __read_mostly; /* IPL_NONE */ 103 104 static struct sysctllog *cprng_sysctllog __read_mostly; 105 106 void 107 cprng_init(void) 108 { 109 110 if (__predict_false(nist_hash_drbg_initialize() != 0)) 111 panic("NIST Hash_DRBG failed self-test"); 112 113 /* 114 * Create CPRNG instances at two IPLs: IPL_SOFTSERIAL for 115 * kernel use that may occur inside soft interrupt handlers, 116 * and IPL_NONE for userland use which need not block 117 * interrupts. 118 */ 119 kern_cprng = cprng_strong_create("kern", IPL_SOFTSERIAL, 0); 120 user_cprng = cprng_strong_create("user", IPL_NONE, 0); 121 122 /* Create kern.urandom and kern.arandom sysctl nodes. */ 123 sysctl_createv(&cprng_sysctllog, 0, NULL, NULL, 124 CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, "urandom", 125 SYSCTL_DESCR("Independent uniform random 32-bit integer"), 126 sysctl_kern_urandom, 0, NULL, 0, CTL_KERN, KERN_URND, CTL_EOL); 127 sysctl_createv(&cprng_sysctllog, 0, NULL, NULL, 128 CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT /*lie*/, "arandom", 129 SYSCTL_DESCR("Independent uniform random bytes, up to 256 bytes"), 130 sysctl_kern_arandom, 0, NULL, 0, CTL_KERN, KERN_ARND, CTL_EOL); 131 } 132 133 /* 134 * sysctl kern.urandom 135 * 136 * Independent uniform random 32-bit integer. Read-only. 137 */ 138 static int 139 sysctl_kern_urandom(SYSCTLFN_ARGS) 140 { 141 struct sysctlnode node = *rnode; 142 int v; 143 int error; 144 145 /* Generate an int's worth of data. */ 146 cprng_strong(user_cprng, &v, sizeof v, 0); 147 148 /* Do the sysctl dance. */ 149 node.sysctl_data = &v; 150 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 151 152 /* Clear the buffer before returning the sysctl error. */ 153 explicit_memset(&v, 0, sizeof v); 154 return error; 155 } 156 157 /* 158 * sysctl kern.arandom 159 * 160 * Independent uniform random bytes, up to 256 bytes. Read-only. 161 */ 162 static int 163 sysctl_kern_arandom(SYSCTLFN_ARGS) 164 { 165 struct sysctlnode node = *rnode; 166 uint8_t buf[256]; 167 int error; 168 169 /* 170 * Clamp to a reasonably small size. 256 bytes is kind of 171 * arbitrary; 32 would be more reasonable, but we used 256 in 172 * the past, so let's not break compatibility. 173 */ 174 if (*oldlenp > 256) /* size_t, so never negative */ 175 *oldlenp = 256; 176 177 /* Generate data. */ 178 cprng_strong(user_cprng, buf, *oldlenp, 0); 179 180 /* Do the sysctl dance. */ 181 node.sysctl_data = buf; 182 node.sysctl_size = *oldlenp; 183 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 184 185 /* Clear the buffer before returning the sysctl error. */ 186 explicit_memset(buf, 0, sizeof buf); 187 return error; 188 } 189 190 struct cprng_strong * 191 cprng_strong_create(const char *name, int ipl, int flags) 192 { 193 struct cprng_strong *cprng; 194 195 cprng = kmem_alloc(sizeof(*cprng), KM_SLEEP); 196 cprng->cs_iplcookie = makeiplcookie(ipl); 197 cprng->cs_percpu = percpu_create(sizeof(struct cprng_cpu), 198 cprng_init_cpu, cprng_fini_cpu, __UNCONST(name)); 199 200 return cprng; 201 } 202 203 void 204 cprng_strong_destroy(struct cprng_strong *cprng) 205 { 206 207 percpu_free(cprng->cs_percpu, sizeof(struct cprng_cpu)); 208 kmem_free(cprng, sizeof(*cprng)); 209 } 210 211 static void 212 cprng_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) 213 { 214 struct cprng_cpu *cc = ptr; 215 const char *name = cookie; 216 const char *cpuname; 217 uint8_t zero[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; 218 char namebuf[64]; /* XXX size? */ 219 220 /* 221 * Format the name as, e.g., kern/8 if we're on cpu8. This 222 * doesn't get displayed anywhere; it just ensures that if 223 * there were a bug causing us to use the same otherwise secure 224 * seed on multiple CPUs, we would still get independent output 225 * from the NIST Hash_DRBG. 226 */ 227 snprintf(namebuf, sizeof namebuf, "%s/%u", name, cpu_index(ci)); 228 229 /* 230 * Allocate the struct nist_hash_drbg and struct evcnt 231 * separately, since percpu(9) may move objects around in 232 * memory without zeroing. 233 */ 234 cc->cc_drbg = kmem_zalloc(sizeof(*cc->cc_drbg), KM_SLEEP); 235 cc->cc_evcnt = kmem_alloc(sizeof(*cc->cc_evcnt), KM_SLEEP); 236 237 /* 238 * Initialize the DRBG with no seed. We do this in order to 239 * defer reading from the entropy pool as long as possible. 240 */ 241 if (__predict_false(nist_hash_drbg_instantiate(cc->cc_drbg, 242 zero, sizeof zero, NULL, 0, namebuf, strlen(namebuf)))) 243 panic("nist_hash_drbg_instantiate"); 244 245 /* Attach the event counters. */ 246 /* XXX ci_cpuname may not be initialized early enough. */ 247 cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; 248 evcnt_attach_dynamic(&cc->cc_evcnt->reseed, EVCNT_TYPE_MISC, NULL, 249 cpuname, "cprng_strong reseed"); 250 251 /* Set the epoch uninitialized so we reseed on first use. */ 252 cc->cc_epoch = 0; 253 } 254 255 static void 256 cprng_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) 257 { 258 struct cprng_cpu *cc = ptr; 259 260 evcnt_detach(&cc->cc_evcnt->reseed); 261 if (__predict_false(nist_hash_drbg_destroy(cc->cc_drbg))) 262 panic("nist_hash_drbg_destroy"); 263 264 kmem_free(cc->cc_evcnt, sizeof(*cc->cc_evcnt)); 265 kmem_free(cc->cc_drbg, sizeof(*cc->cc_drbg)); 266 } 267 268 static void 269 cprng_strong_reseed(struct cprng_strong *cprng, unsigned epoch, 270 struct cprng_cpu **ccp, int *sp) 271 { 272 uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES]; 273 274 /* 275 * Drop everything to extract a fresh seed from the entropy 276 * pool. entropy_extract may sleep on an adaptive lock, which 277 * invalidates our percpu(9) reference. 278 * 279 * This may race with reseeding in another thread, which is no 280 * big deal -- worst case, we rewind the entropy epoch here and 281 * cause the next caller to reseed again, and in the end we 282 * just reseed a couple more times than necessary. 283 */ 284 splx(*sp); 285 percpu_putref(cprng->cs_percpu); 286 entropy_extract(seed, sizeof seed, 0); 287 *ccp = percpu_getref(cprng->cs_percpu); 288 *sp = splraiseipl(cprng->cs_iplcookie); 289 290 (*ccp)->cc_evcnt->reseed.ev_count++; 291 if (__predict_false(nist_hash_drbg_reseed((*ccp)->cc_drbg, 292 seed, sizeof seed, NULL, 0))) 293 panic("nist_hash_drbg_reseed"); 294 explicit_memset(seed, 0, sizeof seed); 295 (*ccp)->cc_epoch = epoch; 296 } 297 298 size_t 299 cprng_strong(struct cprng_strong *cprng, void *buf, size_t len, int flags) 300 { 301 struct cprng_cpu *cc; 302 unsigned epoch; 303 int s; 304 305 /* Not allowed in hard interrupt context. */ 306 KASSERT(!cpu_intr_p()); 307 308 /* 309 * Verify maximum request length. Caller should really limit 310 * their requests to 32 bytes to avoid spending much time with 311 * preemption disabled -- use the 32 bytes to seed a private 312 * DRBG instance if you need more data. 313 */ 314 KASSERT(len <= CPRNG_MAX_LEN); 315 316 /* Verify legacy API use. */ 317 KASSERT(flags == 0); 318 319 /* Acquire per-CPU state and block interrupts. */ 320 cc = percpu_getref(cprng->cs_percpu); 321 s = splraiseipl(cprng->cs_iplcookie); 322 323 /* If the entropy epoch has changed, (re)seed. */ 324 epoch = entropy_epoch(); 325 if (__predict_false(epoch != cc->cc_epoch)) 326 cprng_strong_reseed(cprng, epoch, &cc, &s); 327 328 /* Generate data. Failure here means it's time to reseed. */ 329 if (__predict_false(nist_hash_drbg_generate(cc->cc_drbg, buf, len, 330 NULL, 0))) { 331 cprng_strong_reseed(cprng, epoch, &cc, &s); 332 if (__predict_false(nist_hash_drbg_generate(cc->cc_drbg, 333 buf, len, NULL, 0))) 334 panic("nist_hash_drbg_generate"); 335 } 336 337 /* Release state and interrupts. */ 338 splx(s); 339 percpu_putref(cprng->cs_percpu); 340 341 /* Return the number of bytes generated, for hysterical raisins. */ 342 return len; 343 } 344 345 uint32_t 346 cprng_strong32(void) 347 { 348 uint32_t r; 349 cprng_strong(kern_cprng, &r, sizeof(r), 0); 350 return r; 351 } 352 353 uint64_t 354 cprng_strong64(void) 355 { 356 uint64_t r; 357 cprng_strong(kern_cprng, &r, sizeof(r), 0); 358 return r; 359 } 360