xref: /netbsd-src/sys/kern/subr_cprng.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: subr_cprng.c,v 1.31 2019/09/02 20:09:30 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Thor Lancelot Simon and Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.31 2019/09/02 20:09:30 riastradh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/condvar.h>
38 #include <sys/cprng.h>
39 #include <sys/errno.h>
40 #include <sys/event.h>		/* XXX struct knote */
41 #include <sys/fcntl.h>		/* XXX FNONBLOCK */
42 #include <sys/kernel.h>
43 #include <sys/kmem.h>
44 #include <sys/lwp.h>
45 #include <sys/once.h>
46 #include <sys/percpu.h>
47 #include <sys/poll.h>		/* XXX POLLIN/POLLOUT/&c. */
48 #include <sys/select.h>
49 #include <sys/systm.h>
50 #include <sys/sysctl.h>
51 #include <sys/rndsink.h>
52 #if DIAGNOSTIC
53 #include <sys/rngtest.h>
54 #endif
55 
56 #include <crypto/nist_hash_drbg/nist_hash_drbg.h>
57 
58 #if defined(__HAVE_CPU_COUNTER)
59 #include <machine/cpu_counter.h>
60 #endif
61 
62 static int sysctl_kern_urnd(SYSCTLFN_PROTO);
63 static int sysctl_kern_arnd(SYSCTLFN_PROTO);
64 
65 static void	cprng_strong_generate(struct cprng_strong *, void *, size_t);
66 static void	cprng_strong_reseed(struct cprng_strong *);
67 static void	cprng_strong_reseed_from(struct cprng_strong *, const void *,
68 		    size_t, bool);
69 #if DIAGNOSTIC
70 static void	cprng_strong_rngtest(struct cprng_strong *);
71 #endif
72 
73 static rndsink_callback_t	cprng_strong_rndsink_callback;
74 
75 void
76 cprng_init(void)
77 {
78 	static struct sysctllog *random_sysctllog;
79 
80 	if (nist_hash_drbg_initialize() != 0)
81 		panic("NIST Hash_DRBG failed self-test");
82 
83 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
84 		       CTLFLAG_PERMANENT,
85 		       CTLTYPE_INT, "urandom",
86 		       SYSCTL_DESCR("Random integer value"),
87 		       sysctl_kern_urnd, 0, NULL, 0,
88 		       CTL_KERN, KERN_URND, CTL_EOL);
89 	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
90 		       CTLFLAG_PERMANENT,
91 		       CTLTYPE_INT, "arandom",
92 		       SYSCTL_DESCR("n bytes of random data"),
93 		       sysctl_kern_arnd, 0, NULL, 0,
94 		       CTL_KERN, KERN_ARND, CTL_EOL);
95 }
96 
97 static inline uint32_t
98 cprng_counter(void)
99 {
100 	struct timeval tv;
101 
102 #if defined(__HAVE_CPU_COUNTER)
103 	if (cpu_hascounter())
104 		return cpu_counter32();
105 #endif
106 	if (__predict_false(cold)) {
107 		static int ctr;
108 		/* microtime unsafe if clock not running yet */
109 		return ctr++;
110 	}
111 	getmicrotime(&tv);
112 	return (tv.tv_sec * 1000000 + tv.tv_usec);
113 }
114 
115 struct cprng_strong {
116 	char		cs_name[16];
117 	int		cs_flags;
118 	kmutex_t	cs_lock;
119 	percpu_t	*cs_percpu;
120 	kcondvar_t	cs_cv;
121 	struct selinfo	cs_selq;
122 	struct rndsink	*cs_rndsink;
123 	bool		cs_ready;
124 	NIST_HASH_DRBG	cs_drbg;
125 
126 	/* XXX Kludge for /dev/random `information-theoretic' properties.   */
127 	unsigned int	cs_remaining;
128 };
129 
130 struct cprng_strong *
131 cprng_strong_create(const char *name, int ipl, int flags)
132 {
133 	const uint32_t cc = cprng_counter();
134 	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
135 	    KM_SLEEP);
136 
137 	/*
138 	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
139 	 * higher than that.
140 	 */
141 	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
142 
143 	/* Initialize the easy fields.  */
144 	memset(cprng->cs_name, 0, sizeof(cprng->cs_name));
145 	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
146 	cprng->cs_flags = flags;
147 	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
148 	cv_init(&cprng->cs_cv, cprng->cs_name);
149 	selinit(&cprng->cs_selq);
150 	cprng->cs_rndsink = rndsink_create(NIST_HASH_DRBG_MIN_SEEDLEN_BYTES,
151 	    &cprng_strong_rndsink_callback, cprng);
152 
153 	/* Get some initial entropy.  Record whether it is full entropy.  */
154 	uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
155 	mutex_enter(&cprng->cs_lock);
156 	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
157 	    sizeof(seed));
158 	if (nist_hash_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
159 		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
160 		/* XXX Fix nist_hash_drbg API so this can't happen.  */
161 		panic("cprng %s: NIST Hash_DRBG instantiation failed",
162 		    cprng->cs_name);
163 	explicit_memset(seed, 0, sizeof(seed));
164 
165 	if (ISSET(flags, CPRNG_HARD))
166 		cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
167 	else
168 		cprng->cs_remaining = 0;
169 
170 	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
171 		printf("cprng %s: creating with partial entropy\n",
172 		    cprng->cs_name);
173 	mutex_exit(&cprng->cs_lock);
174 
175 	return cprng;
176 }
177 
178 void
179 cprng_strong_destroy(struct cprng_strong *cprng)
180 {
181 
182 	/*
183 	 * Destroy the rndsink first to prevent calls to the callback.
184 	 */
185 	rndsink_destroy(cprng->cs_rndsink);
186 
187 	KASSERT(!cv_has_waiters(&cprng->cs_cv));
188 #if 0
189 	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
190 #endif
191 
192 	nist_hash_drbg_destroy(&cprng->cs_drbg);
193 	seldestroy(&cprng->cs_selq);
194 	cv_destroy(&cprng->cs_cv);
195 	mutex_destroy(&cprng->cs_lock);
196 
197 	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
198 	kmem_free(cprng, sizeof(*cprng));
199 }
200 
201 /*
202  * Generate some data from cprng.  Block or return zero bytes,
203  * depending on flags & FNONBLOCK, if cprng was created without
204  * CPRNG_REKEY_ANY.
205  */
206 size_t
207 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
208 {
209 	size_t result;
210 
211 	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
212 	bytes = MIN(bytes, CPRNG_MAX_LEN);
213 
214 	mutex_enter(&cprng->cs_lock);
215 
216 	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
217 		if (!cprng->cs_ready)
218 			cprng_strong_reseed(cprng);
219 	} else {
220 		while (!cprng->cs_ready) {
221 			if (ISSET(flags, FNONBLOCK) ||
222 			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
223 			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
224 				result = 0;
225 				goto out;
226 			}
227 		}
228 	}
229 
230 	/*
231 	 * Debit the entropy if requested.
232 	 *
233 	 * XXX Kludge for /dev/random `information-theoretic' properties.
234 	 */
235 	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
236 		KASSERT(0 < cprng->cs_remaining);
237 		KASSERT(cprng->cs_remaining <=
238 		    NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
239 		if (bytes < cprng->cs_remaining) {
240 			cprng->cs_remaining -= bytes;
241 		} else {
242 			bytes = cprng->cs_remaining;
243 			cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
244 			cprng->cs_ready = false;
245 			rndsink_schedule(cprng->cs_rndsink);
246 		}
247 		KASSERT(bytes <= NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
248 		KASSERT(0 < cprng->cs_remaining);
249 		KASSERT(cprng->cs_remaining <=
250 		    NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
251 	}
252 
253 	cprng_strong_generate(cprng, buffer, bytes);
254 	result = bytes;
255 
256 out:	mutex_exit(&cprng->cs_lock);
257 	return result;
258 }
259 
260 static void
261 filt_cprng_detach(struct knote *kn)
262 {
263 	struct cprng_strong *const cprng = kn->kn_hook;
264 
265 	mutex_enter(&cprng->cs_lock);
266 	SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
267 	mutex_exit(&cprng->cs_lock);
268 }
269 
270 static int
271 filt_cprng_read_event(struct knote *kn, long hint)
272 {
273 	struct cprng_strong *const cprng = kn->kn_hook;
274 	int ret;
275 
276 	if (hint == NOTE_SUBMIT)
277 		KASSERT(mutex_owned(&cprng->cs_lock));
278 	else
279 		mutex_enter(&cprng->cs_lock);
280 	if (cprng->cs_ready) {
281 		kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large?  */
282 		ret = 1;
283 	} else {
284 		ret = 0;
285 	}
286 	if (hint == NOTE_SUBMIT)
287 		KASSERT(mutex_owned(&cprng->cs_lock));
288 	else
289 		mutex_exit(&cprng->cs_lock);
290 
291 	return ret;
292 }
293 
294 static int
295 filt_cprng_write_event(struct knote *kn, long hint)
296 {
297 	struct cprng_strong *const cprng = kn->kn_hook;
298 
299 	if (hint == NOTE_SUBMIT)
300 		KASSERT(mutex_owned(&cprng->cs_lock));
301 	else
302 		mutex_enter(&cprng->cs_lock);
303 
304 	kn->kn_data = 0;
305 
306 	if (hint == NOTE_SUBMIT)
307 		KASSERT(mutex_owned(&cprng->cs_lock));
308 	else
309 		mutex_exit(&cprng->cs_lock);
310 
311 	return 0;
312 }
313 
314 static const struct filterops cprng_read_filtops = {
315 	.f_isfd = 1,
316 	.f_attach = NULL,
317 	.f_detach = filt_cprng_detach,
318 	.f_event = filt_cprng_read_event,
319 };
320 
321 static const struct filterops cprng_write_filtops = {
322 	.f_isfd = 1,
323 	.f_attach = NULL,
324 	.f_detach = filt_cprng_detach,
325 	.f_event = filt_cprng_write_event,
326 };
327 
328 int
329 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
330 {
331 
332 	switch (kn->kn_filter) {
333 	case EVFILT_READ:
334 		kn->kn_fop = &cprng_read_filtops;
335 		break;
336 	case EVFILT_WRITE:
337 		kn->kn_fop = &cprng_write_filtops;
338 		break;
339 	default:
340 		return EINVAL;
341 	}
342 
343 	kn->kn_hook = cprng;
344 	mutex_enter(&cprng->cs_lock);
345 	SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
346 	mutex_exit(&cprng->cs_lock);
347 	return 0;
348 }
349 
350 int
351 cprng_strong_poll(struct cprng_strong *cprng, int events)
352 {
353 	int revents;
354 
355 	if (!ISSET(events, (POLLIN | POLLRDNORM)))
356 		return 0;
357 
358 	mutex_enter(&cprng->cs_lock);
359 	if (cprng->cs_ready) {
360 		revents = (events & (POLLIN | POLLRDNORM));
361 	} else {
362 		selrecord(curlwp, &cprng->cs_selq);
363 		revents = 0;
364 	}
365 	mutex_exit(&cprng->cs_lock);
366 
367 	return revents;
368 }
369 
370 /*
371  * XXX Move nist_hash_drbg_reseed_advised_p and
372  * nist_hash_drbg_reseed_needed_p into the nist_hash_drbg API and make
373  * the NIST_HASH_DRBG structure opaque.
374  */
375 static bool
376 nist_hash_drbg_reseed_advised_p(NIST_HASH_DRBG *drbg)
377 {
378 
379 	return (drbg->reseed_counter > (NIST_HASH_DRBG_RESEED_INTERVAL / 2));
380 }
381 
382 static bool
383 nist_hash_drbg_reseed_needed_p(NIST_HASH_DRBG *drbg)
384 {
385 
386 	return (drbg->reseed_counter >= NIST_HASH_DRBG_RESEED_INTERVAL);
387 }
388 
389 /*
390  * Generate some data from the underlying generator.
391  */
392 static void
393 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
394 {
395 	const uint32_t cc = cprng_counter();
396 
397 	KASSERT(bytes <= CPRNG_MAX_LEN);
398 	KASSERT(mutex_owned(&cprng->cs_lock));
399 
400 	/*
401 	 * Generate some data from the NIST Hash_DRBG.  Caller
402 	 * guarantees reseed if we're not ready, and if we exhaust the
403 	 * generator, we mark ourselves not ready.  Consequently, this
404 	 * call to the Hash_DRBG should not fail.
405 	 */
406 	if (__predict_false(nist_hash_drbg_generate(&cprng->cs_drbg, buffer,
407 		    bytes, &cc, sizeof(cc))))
408 		panic("cprng %s: NIST Hash_DRBG failed", cprng->cs_name);
409 
410 	/*
411 	 * If we've been seeing a lot of use, ask for some fresh
412 	 * entropy soon.
413 	 */
414 	if (__predict_false(nist_hash_drbg_reseed_advised_p(&cprng->cs_drbg)))
415 		rndsink_schedule(cprng->cs_rndsink);
416 
417 	/*
418 	 * If we just exhausted the generator, inform the next user
419 	 * that we need a reseed.
420 	 */
421 	if (__predict_false(nist_hash_drbg_reseed_needed_p(&cprng->cs_drbg))) {
422 		cprng->cs_ready = false;
423 		rndsink_schedule(cprng->cs_rndsink); /* paranoia */
424 	}
425 }
426 
427 /*
428  * Reseed with whatever we can get from the system entropy pool right now.
429  */
430 static void
431 cprng_strong_reseed(struct cprng_strong *cprng)
432 {
433 	uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
434 
435 	KASSERT(mutex_owned(&cprng->cs_lock));
436 
437 	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
438 	    sizeof(seed));
439 	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
440 	explicit_memset(seed, 0, sizeof(seed));
441 }
442 
443 /*
444  * Reseed with the given seed.  If we now have full entropy, notify waiters.
445  */
446 static void
447 cprng_strong_reseed_from(struct cprng_strong *cprng,
448     const void *seed, size_t bytes, bool full_entropy)
449 {
450 	const uint32_t cc = cprng_counter();
451 
452 	KASSERT(bytes == NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
453 	KASSERT(mutex_owned(&cprng->cs_lock));
454 
455 	/*
456 	 * Notify anyone interested in the partiality of entropy in our
457 	 * seed -- anyone waiting for full entropy, or any system
458 	 * operators interested in knowing when the entropy pool is
459 	 * running on fumes.
460 	 */
461 	if (full_entropy) {
462 		if (!cprng->cs_ready) {
463 			cprng->cs_ready = true;
464 			cv_broadcast(&cprng->cs_cv);
465 			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
466 			    NOTE_SUBMIT);
467 		}
468 	} else {
469 		/*
470 		 * XXX Is there is any harm in reseeding with partial
471 		 * entropy when we had full entropy before?  If so,
472 		 * remove the conditional on this message.
473 		 */
474 		if (!cprng->cs_ready &&
475 		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
476 			printf("cprng %s: reseeding with partial entropy\n",
477 			    cprng->cs_name);
478 	}
479 
480 	if (nist_hash_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc,
481 		sizeof(cc)))
482 		/* XXX Fix nist_hash_drbg API so this can't happen.  */
483 		panic("cprng %s: NIST Hash_DRBG reseed failed",
484 		    cprng->cs_name);
485 
486 #if DIAGNOSTIC
487 	cprng_strong_rngtest(cprng);
488 #endif
489 }
490 
491 #if DIAGNOSTIC
492 /*
493  * Generate some output and apply a statistical RNG test to it.
494  */
495 static void
496 cprng_strong_rngtest(struct cprng_strong *cprng)
497 {
498 
499 	KASSERT(mutex_owned(&cprng->cs_lock));
500 
501 	/* XXX Switch to a pool cache instead?  */
502 	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
503 	if (rt == NULL)
504 		/* XXX Warn?  */
505 		return;
506 
507 	(void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
508 
509 	if (nist_hash_drbg_generate(&cprng->cs_drbg, rt->rt_b,
510 		sizeof(rt->rt_b), NULL, 0))
511 		panic("cprng %s: NIST Hash_DRBG failed after reseed",
512 		    cprng->cs_name);
513 
514 	if (rngtest(rt)) {
515 		printf("cprng %s: failed statistical RNG test\n",
516 		    cprng->cs_name);
517 		/* XXX Not clear that this does any good...  */
518 		cprng->cs_ready = false;
519 		rndsink_schedule(cprng->cs_rndsink);
520 	}
521 
522 	explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
523 	kmem_intr_free(rt, sizeof(*rt));
524 }
525 #endif
526 
527 /*
528  * Feed entropy from an rndsink request into the CPRNG for which the
529  * request was issued.
530  */
531 static void
532 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
533 {
534 	struct cprng_strong *const cprng = context;
535 
536 	mutex_enter(&cprng->cs_lock);
537 	/* Assume that rndsinks provide only full-entropy output.  */
538 	cprng_strong_reseed_from(cprng, seed, bytes, true);
539 	mutex_exit(&cprng->cs_lock);
540 }
541 
542 static cprng_strong_t *sysctl_prng;
543 
544 static int
545 makeprng(void)
546 {
547 
548 	/* can't create in cprng_init(), too early */
549 	sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
550 					  CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
551 	return 0;
552 }
553 
554 /*
555  * sysctl helper routine for kern.urandom node. Picks a random number
556  * for you.
557  */
558 static int
559 sysctl_kern_urnd(SYSCTLFN_ARGS)
560 {
561 	static ONCE_DECL(control);
562 	int v, rv;
563 
564 	RUN_ONCE(&control, makeprng);
565 	rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
566 	if (rv == sizeof(v)) {
567 		struct sysctlnode node = *rnode;
568 		node.sysctl_data = &v;
569 		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
570 	}
571 	else
572 		return (EIO);	/*XXX*/
573 }
574 
575 /*
576  * sysctl helper routine for kern.arandom node.  Fills the supplied
577  * structure with random data for you.
578  *
579  * This node was originally declared as type "int" but its implementation
580  * in OpenBSD, whence it came, would happily return up to 8K of data if
581  * requested.  Evidently this was used to key RC4 in userspace.
582  *
583  * In NetBSD, the libc stack-smash-protection code reads 64 bytes
584  * from here at every program startup.  So though it would be nice
585  * to make this node return only 32 or 64 bits, we can't.  Too bad!
586  */
587 static int
588 sysctl_kern_arnd(SYSCTLFN_ARGS)
589 {
590 	int error;
591 	void *v;
592 	struct sysctlnode node = *rnode;
593 
594 	switch (*oldlenp) {
595 	    case 0:
596 		return 0;
597 	    default:
598 		if (*oldlenp > 256) {
599 			return E2BIG;
600 		}
601 		v = kmem_alloc(*oldlenp, KM_SLEEP);
602 		cprng_fast(v, *oldlenp);
603 		node.sysctl_data = v;
604 		node.sysctl_size = *oldlenp;
605 		error = sysctl_lookup(SYSCTLFN_CALL(&node));
606 		kmem_free(v, *oldlenp);
607 		return error;
608 	}
609 }
610