xref: /netbsd-src/sys/kern/subr_cprng.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: subr_cprng.c,v 1.22 2013/07/27 11:19:09 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Thor Lancelot Simon and Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.22 2013/07/27 11:19:09 skrll Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/condvar.h>
38 #include <sys/cprng.h>
39 #include <sys/errno.h>
40 #include <sys/event.h>		/* XXX struct knote */
41 #include <sys/fcntl.h>		/* XXX FNONBLOCK */
42 #include <sys/kernel.h>
43 #include <sys/kmem.h>
44 #include <sys/lwp.h>
45 #include <sys/poll.h>		/* XXX POLLIN/POLLOUT/&c. */
46 #include <sys/select.h>
47 #include <sys/systm.h>
48 #include <sys/rnd.h>
49 #include <sys/rndsink.h>
50 #if DEBUG
51 #include <sys/rngtest.h>
52 #endif
53 
54 #include <crypto/nist_ctr_drbg/nist_ctr_drbg.h>
55 
56 #if defined(__HAVE_CPU_COUNTER)
57 #include <machine/cpu_counter.h>
58 #endif
59 
60 static void	cprng_strong_generate(struct cprng_strong *, void *, size_t);
61 static void	cprng_strong_reseed(struct cprng_strong *);
62 static void	cprng_strong_reseed_from(struct cprng_strong *, const void *,
63 		    size_t, bool);
64 #if DEBUG
65 static void	cprng_strong_rngtest(struct cprng_strong *);
66 #endif
67 
68 static rndsink_callback_t	cprng_strong_rndsink_callback;
69 
70 void
71 cprng_init(void)
72 {
73 	nist_ctr_initialize();
74 }
75 
76 static inline uint32_t
77 cprng_counter(void)
78 {
79 	struct timeval tv;
80 
81 #if defined(__HAVE_CPU_COUNTER)
82 	if (cpu_hascounter())
83 		return cpu_counter32();
84 #endif
85 	if (__predict_false(cold)) {
86 		/* microtime unsafe if clock not running yet */
87 		return 0;
88 	}
89 	microtime(&tv);
90 	return (tv.tv_sec * 1000000 + tv.tv_usec);
91 }
92 
93 struct cprng_strong {
94 	char		cs_name[16];
95 	int		cs_flags;
96 	kmutex_t	cs_lock;
97 	kcondvar_t	cs_cv;
98 	struct selinfo	cs_selq;
99 	struct rndsink	*cs_rndsink;
100 	bool		cs_ready;
101 	NIST_CTR_DRBG	cs_drbg;
102 
103 	/* XXX Kludge for /dev/random `information-theoretic' properties.   */
104 	unsigned int	cs_remaining;
105 };
106 
107 struct cprng_strong *
108 cprng_strong_create(const char *name, int ipl, int flags)
109 {
110 	const uint32_t cc = cprng_counter();
111 	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
112 	    KM_SLEEP);
113 
114 	/*
115 	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
116 	 * higher than that.
117 	 */
118 	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
119 
120 	/* Initialize the easy fields.  */
121 	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
122 	cprng->cs_flags = flags;
123 	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
124 	cv_init(&cprng->cs_cv, cprng->cs_name);
125 	selinit(&cprng->cs_selq);
126 	cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES,
127 	    &cprng_strong_rndsink_callback, cprng);
128 
129 	/* Get some initial entropy.  Record whether it is full entropy.  */
130 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
131 	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
132 	    sizeof(seed));
133 	if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
134 		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
135 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
136 		panic("cprng %s: NIST CTR_DRBG instantiation failed",
137 		    cprng->cs_name);
138 	explicit_memset(seed, 0, sizeof(seed));
139 
140 	if (ISSET(flags, CPRNG_HARD))
141 		cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
142 	else
143 		cprng->cs_remaining = 0;
144 
145 	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
146 		printf("cprng %s: creating with partial entropy\n",
147 		    cprng->cs_name);
148 
149 	return cprng;
150 }
151 
152 void
153 cprng_strong_destroy(struct cprng_strong *cprng)
154 {
155 
156 	/*
157 	 * Destroy the rndsink first to prevent calls to the callback.
158 	 */
159 	rndsink_destroy(cprng->cs_rndsink);
160 
161 	KASSERT(!cv_has_waiters(&cprng->cs_cv));
162 #if 0
163 	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
164 #endif
165 
166 	nist_ctr_drbg_destroy(&cprng->cs_drbg);
167 	seldestroy(&cprng->cs_selq);
168 	cv_destroy(&cprng->cs_cv);
169 	mutex_destroy(&cprng->cs_lock);
170 
171 	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
172 	kmem_free(cprng, sizeof(*cprng));
173 }
174 
175 /*
176  * Generate some data from cprng.  Block or return zero bytes,
177  * depending on flags & FNONBLOCK, if cprng was created without
178  * CPRNG_REKEY_ANY.
179  */
180 size_t
181 cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
182 {
183 	size_t result;
184 
185 	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
186 	bytes = MIN(bytes, CPRNG_MAX_LEN);
187 
188 	mutex_enter(&cprng->cs_lock);
189 
190 	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
191 		if (!cprng->cs_ready)
192 			cprng_strong_reseed(cprng);
193 	} else {
194 		while (!cprng->cs_ready) {
195 			if (ISSET(flags, FNONBLOCK) ||
196 			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
197 			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
198 				result = 0;
199 				goto out;
200 			}
201 		}
202 	}
203 
204 	/*
205 	 * Debit the entropy if requested.
206 	 *
207 	 * XXX Kludge for /dev/random `information-theoretic' properties.
208 	 */
209 	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
210 		KASSERT(0 < cprng->cs_remaining);
211 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
212 		if (bytes < cprng->cs_remaining) {
213 			cprng->cs_remaining -= bytes;
214 		} else {
215 			bytes = cprng->cs_remaining;
216 			cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
217 			cprng->cs_ready = false;
218 			rndsink_schedule(cprng->cs_rndsink);
219 		}
220 		KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
221 		KASSERT(0 < cprng->cs_remaining);
222 		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
223 	}
224 
225 	cprng_strong_generate(cprng, buffer, bytes);
226 	result = bytes;
227 
228 out:	mutex_exit(&cprng->cs_lock);
229 	return result;
230 }
231 
232 static void	filt_cprng_detach(struct knote *);
233 static int	filt_cprng_event(struct knote *, long);
234 
235 static const struct filterops cprng_filtops =
236 	{ 1, NULL, filt_cprng_detach, filt_cprng_event };
237 
238 int
239 cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
240 {
241 
242 	switch (kn->kn_filter) {
243 	case EVFILT_READ:
244 		kn->kn_fop = &cprng_filtops;
245 		kn->kn_hook = cprng;
246 		mutex_enter(&cprng->cs_lock);
247 		SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
248 		mutex_exit(&cprng->cs_lock);
249 		return 0;
250 
251 	case EVFILT_WRITE:
252 	default:
253 		return EINVAL;
254 	}
255 }
256 
257 static void
258 filt_cprng_detach(struct knote *kn)
259 {
260 	struct cprng_strong *const cprng = kn->kn_hook;
261 
262 	mutex_enter(&cprng->cs_lock);
263 	SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
264 	mutex_exit(&cprng->cs_lock);
265 }
266 
267 static int
268 filt_cprng_event(struct knote *kn, long hint)
269 {
270 	struct cprng_strong *const cprng = kn->kn_hook;
271 	int ret;
272 
273 	if (hint == NOTE_SUBMIT)
274 		KASSERT(mutex_owned(&cprng->cs_lock));
275 	else
276 		mutex_enter(&cprng->cs_lock);
277 	if (cprng->cs_ready) {
278 		kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large?  */
279 		ret = 1;
280 	} else {
281 		ret = 0;
282 	}
283 	if (hint == NOTE_SUBMIT)
284 		KASSERT(mutex_owned(&cprng->cs_lock));
285 	else
286 		mutex_exit(&cprng->cs_lock);
287 
288 	return ret;
289 }
290 
291 int
292 cprng_strong_poll(struct cprng_strong *cprng, int events)
293 {
294 	int revents;
295 
296 	if (!ISSET(events, (POLLIN | POLLRDNORM)))
297 		return 0;
298 
299 	mutex_enter(&cprng->cs_lock);
300 	if (cprng->cs_ready) {
301 		revents = (events & (POLLIN | POLLRDNORM));
302 	} else {
303 		selrecord(curlwp, &cprng->cs_selq);
304 		revents = 0;
305 	}
306 	mutex_exit(&cprng->cs_lock);
307 
308 	return revents;
309 }
310 
311 /*
312  * XXX Move nist_ctr_drbg_reseed_advised_p and
313  * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make
314  * the NIST_CTR_DRBG structure opaque.
315  */
316 static bool
317 nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg)
318 {
319 
320 	return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2));
321 }
322 
323 static bool
324 nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg)
325 {
326 
327 	return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL);
328 }
329 
330 /*
331  * Generate some data from the underlying generator.
332  */
333 static void
334 cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
335 {
336 	const uint32_t cc = cprng_counter();
337 
338 	KASSERT(bytes <= CPRNG_MAX_LEN);
339 	KASSERT(mutex_owned(&cprng->cs_lock));
340 
341 	/*
342 	 * Generate some data from the NIST CTR_DRBG.  Caller
343 	 * guarantees reseed if we're not ready, and if we exhaust the
344 	 * generator, we mark ourselves not ready.  Consequently, this
345 	 * call to the CTR_DRBG should not fail.
346 	 */
347 	if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer,
348 		    bytes, &cc, sizeof(cc))))
349 		panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name);
350 
351 	/*
352 	 * If we've been seeing a lot of use, ask for some fresh
353 	 * entropy soon.
354 	 */
355 	if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg)))
356 		rndsink_schedule(cprng->cs_rndsink);
357 
358 	/*
359 	 * If we just exhausted the generator, inform the next user
360 	 * that we need a reseed.
361 	 */
362 	if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) {
363 		cprng->cs_ready = false;
364 		rndsink_schedule(cprng->cs_rndsink); /* paranoia */
365 	}
366 }
367 
368 /*
369  * Reseed with whatever we can get from the system entropy pool right now.
370  */
371 static void
372 cprng_strong_reseed(struct cprng_strong *cprng)
373 {
374 	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
375 
376 	KASSERT(mutex_owned(&cprng->cs_lock));
377 
378 	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
379 	    sizeof(seed));
380 	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
381 	explicit_memset(seed, 0, sizeof(seed));
382 }
383 
384 /*
385  * Reseed with the given seed.  If we now have full entropy, notify waiters.
386  */
387 static void
388 cprng_strong_reseed_from(struct cprng_strong *cprng,
389     const void *seed, size_t bytes, bool full_entropy)
390 {
391 	const uint32_t cc = cprng_counter();
392 
393 	KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES);
394 	KASSERT(mutex_owned(&cprng->cs_lock));
395 
396 	/*
397 	 * Notify anyone interested in the partiality of entropy in our
398 	 * seed -- anyone waiting for full entropy, or any system
399 	 * operators interested in knowing when the entropy pool is
400 	 * running on fumes.
401 	 */
402 	if (full_entropy) {
403 		if (!cprng->cs_ready) {
404 			cprng->cs_ready = true;
405 			cv_broadcast(&cprng->cs_cv);
406 			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
407 			    NOTE_SUBMIT);
408 		}
409 	} else {
410 		/*
411 		 * XXX Is there is any harm in reseeding with partial
412 		 * entropy when we had full entropy before?  If so,
413 		 * remove the conditional on this message.
414 		 */
415 		if (!cprng->cs_ready &&
416 		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
417 			printf("cprng %s: reseeding with partial entropy\n",
418 			    cprng->cs_name);
419 	}
420 
421 	if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc)))
422 		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
423 		panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name);
424 
425 #if DEBUG
426 	cprng_strong_rngtest(cprng);
427 #endif
428 }
429 
430 #if DEBUG
431 /*
432  * Generate some output and apply a statistical RNG test to it.
433  */
434 static void
435 cprng_strong_rngtest(struct cprng_strong *cprng)
436 {
437 
438 	KASSERT(mutex_owned(&cprng->cs_lock));
439 
440 	/* XXX Switch to a pool cache instead?  */
441 	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
442 	if (rt == NULL)
443 		/* XXX Warn?  */
444 		return;
445 
446 	(void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
447 
448 	if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b),
449 		NULL, 0))
450 		panic("cprng %s: NIST CTR_DRBG failed after reseed",
451 		    cprng->cs_name);
452 
453 	if (rngtest(rt)) {
454 		printf("cprng %s: failed statistical RNG test\n",
455 		    cprng->cs_name);
456 		/* XXX Not clear that this does any good...  */
457 		cprng->cs_ready = false;
458 		rndsink_schedule(cprng->cs_rndsink);
459 	}
460 
461 	explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
462 	kmem_intr_free(rt, sizeof(*rt));
463 }
464 #endif
465 
466 /*
467  * Feed entropy from an rndsink request into the CPRNG for which the
468  * request was issued.
469  */
470 static void
471 cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
472 {
473 	struct cprng_strong *const cprng = context;
474 
475 	mutex_enter(&cprng->cs_lock);
476 	/* Assume that rndsinks provide only full-entropy output.  */
477 	cprng_strong_reseed_from(cprng, seed, bytes, true);
478 	mutex_exit(&cprng->cs_lock);
479 }
480