1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * This file implements the interfaces that the /dev/random
27 * driver uses for read(2), write(2) and poll(2) on /dev/random or
28 * /dev/urandom. It also implements the kernel API - random_add_entropy(),
29 * random_add_pseudo_entropy(), random_get_pseudo_bytes()
30 * and random_get_bytes().
31 *
32 * We periodically collect random bits from providers which are registered
33 * with the Kernel Cryptographic Framework (kCF) as capable of random
34 * number generation. The random bits are maintained in a cache and
35 * it is used for high quality random numbers (/dev/random) requests.
36 * We pick a provider and call its SPI routine, if the cache does not have
37 * enough bytes to satisfy a request.
38 *
39 * /dev/urandom requests use a software-based generator algorithm that uses the
40 * random bits in the cache as a seed. We create one pseudo-random generator
41 * (for /dev/urandom) per possible CPU on the system, and use it,
42 * kmem-magazine-style, to avoid cache line contention.
43 *
44 * LOCKING HIERARCHY:
45 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators.
46 * 2) rndpool_lock protects the high-quality randomness pool.
47 * It may be locked while a rmp->rm_mag.rm_lock is held.
48 *
49 * A history note: The kernel API and the software-based algorithms in this
50 * file used to be part of the /dev/random driver.
51 */
52
53 #include <sys/types.h>
54 #include <sys/conf.h>
55 #include <sys/sunddi.h>
56 #include <sys/disp.h>
57 #include <sys/modctl.h>
58 #include <sys/ddi.h>
59 #include <sys/crypto/common.h>
60 #include <sys/crypto/api.h>
61 #include <sys/crypto/impl.h>
62 #include <sys/crypto/sched_impl.h>
63 #include <sys/crypto/ioctladmin.h>
64 #include <sys/random.h>
65 #include <sys/sha1.h>
66 #include <sys/time.h>
67 #include <sys/sysmacros.h>
68 #include <sys/cpuvar.h>
69 #include <sys/taskq.h>
70 #include <rng/fips_random.h>
71
72 #define RNDPOOLSIZE 1024 /* Pool size in bytes */
73 #define MINEXTRACTBYTES 20
74 #define MAXEXTRACTBYTES 1024
75 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */
76 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */
77
78 typedef enum extract_type {
79 NONBLOCK_EXTRACT,
80 BLOCKING_EXTRACT,
81 ALWAYS_EXTRACT
82 } extract_type_t;
83
84 /*
85 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1
86 * routines directly instead of using k-API because we can't return any
87 * error code in /dev/urandom case and we can get an error using k-API
88 * if a mechanism is disabled.
89 */
90 #define HASHSIZE 20
91 #define HASH_CTX SHA1_CTX
92 #define HashInit(ctx) SHA1Init((ctx))
93 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s))
94 #define HashFinal(d, ctx) SHA1Final((d), (ctx))
95
96 /* HMAC-SHA1 */
97 #define HMAC_KEYSIZE 20
98
99 /*
100 * Cache of random bytes implemented as a circular buffer. findex and rindex
101 * track the front and back of the circular buffer.
102 */
103 uint8_t rndpool[RNDPOOLSIZE];
104 static int findex, rindex;
105 static int rnbyte_cnt; /* Number of bytes in the cache */
106
107 static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */
108 /* and the global variables */
109 static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */
110 static int num_waiters; /* #threads waiting to read from /dev/random */
111
112 static struct pollhead rnd_pollhead;
113 /* LINTED E_STATIC_UNUSED */
114 static timeout_id_t kcf_rndtimeout_id;
115 static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID;
116 rnd_stats_t rnd_stats;
117 static boolean_t rng_prov_found = B_TRUE;
118 static boolean_t rng_ok_to_log = B_TRUE;
119 static boolean_t rngprov_task_idle = B_TRUE;
120
121 static void rndc_addbytes(uint8_t *, size_t);
122 static void rndc_getbytes(uint8_t *ptr, size_t len);
123 static void rnd_handler(void *);
124 static void rnd_alloc_magazines();
125
126 void
kcf_rnd_init()127 kcf_rnd_init()
128 {
129 hrtime_t ts;
130 time_t now;
131
132 mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL);
133 cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL);
134
135 /*
136 * Add bytes to the cache using
137 * . 2 unpredictable times: high resolution time since the boot-time,
138 * and the current time-of-the day.
139 * This is used only to make the timeout value in the timer
140 * unpredictable.
141 */
142 ts = gethrtime();
143 rndc_addbytes((uint8_t *)&ts, sizeof (ts));
144
145 (void) drv_getparm(TIME, &now);
146 rndc_addbytes((uint8_t *)&now, sizeof (now));
147
148 rnbyte_cnt = 0;
149 findex = rindex = 0;
150 num_waiters = 0;
151 rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0);
152
153 rnd_alloc_magazines();
154 }
155
156 /*
157 * Return TRUE if at least one provider exists that can
158 * supply random numbers.
159 */
160 boolean_t
kcf_rngprov_check(void)161 kcf_rngprov_check(void)
162 {
163 int rv;
164 kcf_provider_desc_t *pd;
165
166 if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
167 NULL, CRYPTO_FG_RANDOM, 0)) != NULL) {
168 KCF_PROV_REFRELE(pd);
169 /*
170 * We logged a warning once about no provider being available
171 * and now a provider became available. So, set the flag so
172 * that we can log again if the problem recurs.
173 */
174 rng_ok_to_log = B_TRUE;
175 rng_prov_found = B_TRUE;
176 return (B_TRUE);
177 } else {
178 rng_prov_found = B_FALSE;
179 return (B_FALSE);
180 }
181 }
182
183 /*
184 * Pick a software-based provider and submit a request to seed
185 * its random number generator.
186 */
187 static void
rngprov_seed(uint8_t * buf,int len,uint_t entropy_est,uint32_t flags)188 rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags)
189 {
190 kcf_provider_desc_t *pd = NULL;
191
192 if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) ==
193 CRYPTO_SUCCESS) {
194 (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len,
195 entropy_est, flags, NULL);
196 KCF_PROV_REFRELE(pd);
197 }
198 }
199
200 /*
201 * This routine is called for blocking reads.
202 *
203 * The argument is_taskq_thr indicates whether the caller is
204 * the taskq thread dispatched by the timeout handler routine.
205 * In this case, we cycle through all the providers
206 * submitting a request to each provider to generate random numbers.
207 *
208 * For other cases, we pick a provider and submit a request to generate
209 * random numbers. We retry using another provider if we get an error.
210 *
211 * Returns the number of bytes that are written to 'ptr'. Returns -1
212 * if no provider is found. ptr and need are unchanged.
213 */
214 static int
rngprov_getbytes(uint8_t * ptr,size_t need,boolean_t is_taskq_thr)215 rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr)
216 {
217 int rv;
218 int prov_cnt = 0;
219 int total_bytes = 0;
220 kcf_provider_desc_t *pd;
221 kcf_req_params_t params;
222 kcf_prov_tried_t *list = NULL;
223
224 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
225 list, CRYPTO_FG_RANDOM, 0)) != NULL) {
226
227 prov_cnt++;
228
229 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE,
230 pd->pd_sid, ptr, need, 0, 0);
231 rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE);
232 ASSERT(rv != CRYPTO_QUEUED);
233
234 if (rv == CRYPTO_SUCCESS) {
235 total_bytes += need;
236 if (is_taskq_thr)
237 rndc_addbytes(ptr, need);
238 else {
239 KCF_PROV_REFRELE(pd);
240 break;
241 }
242 }
243
244 if (is_taskq_thr || rv != CRYPTO_SUCCESS) {
245 /* Add pd to the linked list of providers tried. */
246 if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) {
247 KCF_PROV_REFRELE(pd);
248 break;
249 }
250 }
251
252 }
253
254 if (list != NULL)
255 kcf_free_triedlist(list);
256
257 if (prov_cnt == 0) { /* no provider could be found. */
258 rng_prov_found = B_FALSE;
259 return (-1);
260 } else {
261 rng_prov_found = B_TRUE;
262 /* See comments in kcf_rngprov_check() */
263 rng_ok_to_log = B_TRUE;
264 }
265
266 return (total_bytes);
267 }
268
269 static void
notify_done(void * arg,int rv)270 notify_done(void *arg, int rv)
271 {
272 uchar_t *rndbuf = arg;
273
274 if (rv == CRYPTO_SUCCESS)
275 rndc_addbytes(rndbuf, MINEXTRACTBYTES);
276
277 bzero(rndbuf, MINEXTRACTBYTES);
278 kmem_free(rndbuf, MINEXTRACTBYTES);
279 }
280
281 /*
282 * Cycle through all the providers submitting a request to each provider
283 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
284 * and ALWAYS_EXTRACT.
285 *
286 * Returns the number of bytes that are written to 'ptr'. Returns -1
287 * if no provider is found. ptr and len are unchanged.
288 */
289 static int
rngprov_getbytes_nblk(uint8_t * ptr,size_t len)290 rngprov_getbytes_nblk(uint8_t *ptr, size_t len)
291 {
292 int rv, total_bytes;
293 size_t blen;
294 uchar_t *rndbuf;
295 kcf_provider_desc_t *pd;
296 kcf_req_params_t params;
297 crypto_call_req_t req;
298 kcf_prov_tried_t *list = NULL;
299 int prov_cnt = 0;
300
301 blen = 0;
302 total_bytes = 0;
303 req.cr_flag = CRYPTO_SKIP_REQID;
304 req.cr_callback_func = notify_done;
305
306 while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
307 list, CRYPTO_FG_RANDOM, 0)) != NULL) {
308
309 prov_cnt ++;
310 switch (pd->pd_prov_type) {
311 case CRYPTO_HW_PROVIDER:
312 /*
313 * We have to allocate a buffer here as we can not
314 * assume that the input buffer will remain valid
315 * when the callback comes. We use a fixed size buffer
316 * to simplify the book keeping.
317 */
318 rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP);
319 if (rndbuf == NULL) {
320 KCF_PROV_REFRELE(pd);
321 if (list != NULL)
322 kcf_free_triedlist(list);
323 return (total_bytes);
324 }
325 req.cr_callback_arg = rndbuf;
326 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms,
327 KCF_OP_RANDOM_GENERATE,
328 pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0);
329 break;
330
331 case CRYPTO_SW_PROVIDER:
332 /*
333 * We do not need to allocate a buffer in the software
334 * provider case as there is no callback involved. We
335 * avoid any extra data copy by directly passing 'ptr'.
336 */
337 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms,
338 KCF_OP_RANDOM_GENERATE,
339 pd->pd_sid, ptr, len, 0, 0);
340 break;
341 }
342
343 rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE);
344 if (rv == CRYPTO_SUCCESS) {
345 switch (pd->pd_prov_type) {
346 case CRYPTO_HW_PROVIDER:
347 /*
348 * Since we have the input buffer handy,
349 * we directly copy to it rather than
350 * adding to the pool.
351 */
352 blen = min(MINEXTRACTBYTES, len);
353 bcopy(rndbuf, ptr, blen);
354 if (len < MINEXTRACTBYTES)
355 rndc_addbytes(rndbuf + len,
356 MINEXTRACTBYTES - len);
357 ptr += blen;
358 len -= blen;
359 total_bytes += blen;
360 break;
361
362 case CRYPTO_SW_PROVIDER:
363 total_bytes += len;
364 len = 0;
365 break;
366 }
367 }
368
369 /*
370 * We free the buffer in the callback routine
371 * for the CRYPTO_QUEUED case.
372 */
373 if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
374 rv != CRYPTO_QUEUED) {
375 bzero(rndbuf, MINEXTRACTBYTES);
376 kmem_free(rndbuf, MINEXTRACTBYTES);
377 }
378
379 if (len == 0) {
380 KCF_PROV_REFRELE(pd);
381 break;
382 }
383
384 if (rv != CRYPTO_SUCCESS) {
385 /* Add pd to the linked list of providers tried. */
386 if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) ==
387 NULL) {
388 KCF_PROV_REFRELE(pd);
389 break;
390 }
391 }
392 }
393
394 if (list != NULL) {
395 kcf_free_triedlist(list);
396 }
397
398 if (prov_cnt == 0) { /* no provider could be found. */
399 rng_prov_found = B_FALSE;
400 return (-1);
401 } else {
402 rng_prov_found = B_TRUE;
403 /* See comments in kcf_rngprov_check() */
404 rng_ok_to_log = B_TRUE;
405 }
406
407 return (total_bytes);
408 }
409
410 static void
rngprov_task(void * arg)411 rngprov_task(void *arg)
412 {
413 int len = (int)(uintptr_t)arg;
414 uchar_t tbuf[MAXEXTRACTBYTES];
415
416 ASSERT(len <= MAXEXTRACTBYTES);
417 (void) rngprov_getbytes(tbuf, len, B_TRUE);
418 rngprov_task_idle = B_TRUE;
419 }
420
421 /*
422 * Returns "len" random or pseudo-random bytes in *ptr.
423 * Will block if not enough random bytes are available and the
424 * call is blocking.
425 *
426 * Called with rndpool_lock held (allowing caller to do optimistic locking;
427 * releases the lock before return).
428 */
429 static int
rnd_get_bytes(uint8_t * ptr,size_t len,extract_type_t how)430 rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how)
431 {
432 size_t bytes;
433 int got;
434
435 ASSERT(mutex_owned(&rndpool_lock));
436 /*
437 * Check if the request can be satisfied from the cache
438 * of random bytes.
439 */
440 if (len <= rnbyte_cnt) {
441 rndc_getbytes(ptr, len);
442 mutex_exit(&rndpool_lock);
443 return (0);
444 }
445 mutex_exit(&rndpool_lock);
446
447 switch (how) {
448 case BLOCKING_EXTRACT:
449 if ((got = rngprov_getbytes(ptr, len, B_FALSE)) == -1)
450 break; /* No provider found */
451
452 if (got == len)
453 return (0);
454 len -= got;
455 ptr += got;
456 break;
457
458 case NONBLOCK_EXTRACT:
459 case ALWAYS_EXTRACT:
460 if ((got = rngprov_getbytes_nblk(ptr, len)) == -1) {
461 /* No provider found */
462 if (how == NONBLOCK_EXTRACT) {
463 return (EAGAIN);
464 }
465 } else {
466 if (got == len)
467 return (0);
468 len -= got;
469 ptr += got;
470 }
471 if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len))
472 return (EAGAIN);
473 break;
474 }
475
476 mutex_enter(&rndpool_lock);
477 while (len > 0) {
478 if (how == BLOCKING_EXTRACT) {
479 /* Check if there is enough */
480 while (rnbyte_cnt < MINEXTRACTBYTES) {
481 num_waiters++;
482 if (cv_wait_sig(&rndpool_read_cv,
483 &rndpool_lock) == 0) {
484 num_waiters--;
485 mutex_exit(&rndpool_lock);
486 return (EINTR);
487 }
488 num_waiters--;
489 }
490 }
491
492 /* Figure out how many bytes to extract */
493 bytes = min(len, rnbyte_cnt);
494 rndc_getbytes(ptr, bytes);
495
496 len -= bytes;
497 ptr += bytes;
498
499 if (len > 0 && how == ALWAYS_EXTRACT) {
500 /*
501 * There are not enough bytes, but we can not block.
502 * This only happens in the case of /dev/urandom which
503 * runs an additional generation algorithm. So, there
504 * is no problem.
505 */
506 while (len > 0) {
507 *ptr = rndpool[findex];
508 ptr++; len--;
509 rindex = findex = (findex + 1) &
510 (RNDPOOLSIZE - 1);
511 }
512 break;
513 }
514 }
515
516 mutex_exit(&rndpool_lock);
517 return (0);
518 }
519
520 int
kcf_rnd_get_bytes(uint8_t * ptr,size_t len,boolean_t noblock)521 kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock)
522 {
523 extract_type_t how;
524 int error;
525
526 how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT;
527 mutex_enter(&rndpool_lock);
528 if ((error = rnd_get_bytes(ptr, len, how)) != 0)
529 return (error);
530
531 BUMP_RND_STATS(rs_rndOut, len);
532 return (0);
533 }
534
535 /*
536 * Revisit this if the structs grow or we come up with a better way
537 * of cache-line-padding structures.
538 */
539 #define RND_CPU_CACHE_SIZE 64
540 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6
541 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \
542 sizeof (rndmag_t))
543 /*
544 * Per-CPU random state. Somewhat like like kmem's magazines, this provides
545 * a per-CPU instance of the pseudo-random generator. We have it much easier
546 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out.
547 *
548 * Note that this usage is preemption-safe; a thread
549 * entering a critical section remembers which generator it locked
550 * and unlocks the same one; should it be preempted and wind up running on
551 * a different CPU, there will be a brief period of increased contention
552 * before it exits the critical section but nothing will melt.
553 */
554 typedef struct rndmag_s
555 {
556 kmutex_t rm_lock;
557 uint8_t *rm_buffer; /* Start of buffer */
558 uint8_t *rm_eptr; /* End of buffer */
559 uint8_t *rm_rptr; /* Current read pointer */
560 uint32_t rm_oblocks; /* time to rekey? */
561 uint32_t rm_ofuzz; /* Rekey backoff state */
562 uint32_t rm_olimit; /* Hard rekey limit */
563 rnd_stats_t rm_stats; /* Per-CPU Statistics */
564 uint32_t rm_key[HASHSIZE/BYTES_IN_WORD]; /* FIPS XKEY */
565 uint32_t rm_seed[HASHSIZE/BYTES_IN_WORD]; /* seed for rekey */
566 uint32_t rm_previous[HASHSIZE/BYTES_IN_WORD]; /* prev random */
567 } rndmag_t;
568
569 typedef struct rndmag_pad_s
570 {
571 rndmag_t rm_mag;
572 uint8_t rm_pad[RND_CPU_PAD];
573 } rndmag_pad_t;
574
575 /*
576 * Generate random bytes for /dev/urandom by applying the
577 * FIPS 186-2 algorithm with a key created from bytes extracted
578 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks
579 * is generated before a new key is obtained.
580 *
581 * Note that callers to this routine are likely to assume it can't fail.
582 *
583 * Called with rmp locked; releases lock.
584 */
585 static int
rnd_generate_pseudo_bytes(rndmag_pad_t * rmp,uint8_t * ptr,size_t len)586 rnd_generate_pseudo_bytes(rndmag_pad_t *rmp, uint8_t *ptr, size_t len)
587 {
588 size_t bytes = len, size;
589 int nblock;
590 uint32_t oblocks;
591 uint32_t tempout[HASHSIZE/BYTES_IN_WORD];
592 uint32_t seed[HASHSIZE/BYTES_IN_WORD];
593 int i;
594 hrtime_t timestamp;
595 uint8_t *src, *dst;
596
597 ASSERT(mutex_owned(&rmp->rm_mag.rm_lock));
598
599 /* Nothing is being asked */
600 if (len == 0) {
601 mutex_exit(&rmp->rm_mag.rm_lock);
602 return (0);
603 }
604
605 nblock = howmany(len, HASHSIZE);
606
607 rmp->rm_mag.rm_oblocks += nblock;
608 oblocks = rmp->rm_mag.rm_oblocks;
609
610 do {
611 if (oblocks >= rmp->rm_mag.rm_olimit) {
612
613 /*
614 * Contention-avoiding rekey: see if
615 * the pool is locked, and if so, wait a bit.
616 * Do an 'exponential back-in' to ensure we don't
617 * run too long without rekey.
618 */
619 if (rmp->rm_mag.rm_ofuzz) {
620 /*
621 * Decaying exponential back-in for rekey.
622 */
623 if ((rnbyte_cnt < MINEXTRACTBYTES) ||
624 (!mutex_tryenter(&rndpool_lock))) {
625 rmp->rm_mag.rm_olimit +=
626 rmp->rm_mag.rm_ofuzz;
627 rmp->rm_mag.rm_ofuzz >>= 1;
628 goto punt;
629 }
630 } else {
631 mutex_enter(&rndpool_lock);
632 }
633
634 /* Get a new chunk of entropy */
635 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key,
636 HMAC_KEYSIZE, ALWAYS_EXTRACT);
637
638 rmp->rm_mag.rm_olimit = PRNG_MAXOBLOCKS/2;
639 rmp->rm_mag.rm_ofuzz = PRNG_MAXOBLOCKS/4;
640 oblocks = 0;
641 rmp->rm_mag.rm_oblocks = nblock;
642 }
643 punt:
644 timestamp = gethrtime();
645
646 src = (uint8_t *)×tamp;
647 dst = (uint8_t *)rmp->rm_mag.rm_seed;
648
649 for (i = 0; i < HASHSIZE; i++) {
650 dst[i] ^= src[i % sizeof (timestamp)];
651 }
652
653 bcopy(rmp->rm_mag.rm_seed, seed, HASHSIZE);
654
655 fips_random_inner(rmp->rm_mag.rm_key, tempout,
656 seed);
657
658 if (bytes >= HASHSIZE) {
659 size = HASHSIZE;
660 } else {
661 size = min(bytes, HASHSIZE);
662 }
663
664 /*
665 * FIPS 140-2: Continuous RNG test - each generation
666 * of an n-bit block shall be compared with the previously
667 * generated block. Test shall fail if any two compared
668 * n-bit blocks are equal.
669 */
670 for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) {
671 if (tempout[i] != rmp->rm_mag.rm_previous[i])
672 break;
673 }
674 if (i == HASHSIZE/BYTES_IN_WORD) {
675 cmn_err(CE_WARN, "kcf_random: The value of 160-bit "
676 "block random bytes are same as the previous "
677 "one.\n");
678 /* discard random bytes and return error */
679 mutex_exit(&rmp->rm_mag.rm_lock);
680 return (EIO);
681 }
682
683 bcopy(tempout, rmp->rm_mag.rm_previous,
684 HASHSIZE);
685
686 bcopy(tempout, ptr, size);
687 ptr += size;
688 bytes -= size;
689 oblocks++;
690 nblock--;
691 } while (bytes > 0);
692
693 /* Zero out sensitive information */
694 bzero(seed, HASHSIZE);
695 bzero(tempout, HASHSIZE);
696 mutex_exit(&rmp->rm_mag.rm_lock);
697 return (0);
698 }
699
700 /*
701 * Per-CPU Random magazines.
702 */
703 static rndmag_pad_t *rndmag;
704 static uint8_t *rndbuf;
705 static size_t rndmag_total;
706 /*
707 * common/os/cpu.c says that platform support code can shrinkwrap
708 * max_ncpus. On the off chance that we get loaded very early, we
709 * read it exactly once, to copy it here.
710 */
711 static uint32_t random_max_ncpus = 0;
712
713 /*
714 * Boot-time tunables, for experimentation.
715 */
716 size_t rndmag_threshold = 2560;
717 size_t rndbuf_len = 5120;
718 size_t rndmag_size = 1280;
719
720
721 int
kcf_rnd_get_pseudo_bytes(uint8_t * ptr,size_t len)722 kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len)
723 {
724 rndmag_pad_t *rmp;
725 uint8_t *cptr, *eptr;
726
727 /*
728 * Anyone who asks for zero bytes of randomness should get slapped.
729 */
730 ASSERT(len > 0);
731
732 /*
733 * Fast path.
734 */
735 for (;;) {
736 rmp = &rndmag[CPU->cpu_seqid];
737 mutex_enter(&rmp->rm_mag.rm_lock);
738
739 /*
740 * Big requests bypass buffer and tail-call the
741 * generate routine directly.
742 */
743 if (len > rndmag_threshold) {
744 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len);
745 return (rnd_generate_pseudo_bytes(rmp, ptr, len));
746 }
747
748 cptr = rmp->rm_mag.rm_rptr;
749 eptr = cptr + len;
750
751 if (eptr <= rmp->rm_mag.rm_eptr) {
752 rmp->rm_mag.rm_rptr = eptr;
753 bcopy(cptr, ptr, len);
754 BUMP_CPU_RND_STATS(rmp, rs_urndOut, len);
755 mutex_exit(&rmp->rm_mag.rm_lock);
756
757 return (0);
758 }
759 /*
760 * End fast path.
761 */
762 rmp->rm_mag.rm_rptr = rmp->rm_mag.rm_buffer;
763 /*
764 * Note: We assume the generate routine always succeeds
765 * in this case (because it does at present..)
766 * It also always releases rm_lock.
767 */
768 (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_mag.rm_buffer,
769 rndbuf_len);
770 }
771 }
772
773 /*
774 * We set up (empty) magazines for all of max_ncpus, possibly wasting a
775 * little memory on big systems that don't have the full set installed.
776 * See above; "empty" means "rptr equal to eptr"; this will trigger the
777 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU.
778 *
779 * TODO: make rndmag_size tunable at run time!
780 */
781 static void
rnd_alloc_magazines()782 rnd_alloc_magazines()
783 {
784 rndmag_pad_t *rmp;
785 int i;
786 uint8_t discard_buf[HASHSIZE];
787
788 rndbuf_len = roundup(rndbuf_len, HASHSIZE);
789 if (rndmag_size < rndbuf_len)
790 rndmag_size = rndbuf_len;
791 rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE);
792
793 random_max_ncpus = max_ncpus;
794 rndmag_total = rndmag_size * random_max_ncpus;
795
796 rndbuf = kmem_alloc(rndmag_total, KM_SLEEP);
797 rndmag = kmem_zalloc(sizeof (rndmag_pad_t) * random_max_ncpus,
798 KM_SLEEP);
799
800 for (i = 0; i < random_max_ncpus; i++) {
801 uint8_t *buf;
802
803 rmp = &rndmag[i];
804 mutex_init(&rmp->rm_mag.rm_lock, NULL, MUTEX_DRIVER, NULL);
805
806 buf = rndbuf + i * rndmag_size;
807
808 rmp->rm_mag.rm_buffer = buf;
809 rmp->rm_mag.rm_eptr = buf + rndbuf_len;
810 rmp->rm_mag.rm_rptr = buf + rndbuf_len;
811 rmp->rm_mag.rm_oblocks = 1;
812
813 mutex_enter(&rndpool_lock);
814 /*
815 * FIPS 140-2: the first n-bit (n > 15) block generated
816 * after power-up, initialization, or reset shall not
817 * be used, but shall be saved for comparison.
818 */
819 (void) rnd_get_bytes(discard_buf,
820 HMAC_KEYSIZE, ALWAYS_EXTRACT);
821 bcopy(discard_buf, rmp->rm_mag.rm_previous,
822 HMAC_KEYSIZE);
823 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
824 mutex_enter(&rndpool_lock);
825 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_key,
826 HMAC_KEYSIZE, ALWAYS_EXTRACT);
827 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
828 mutex_enter(&rndpool_lock);
829 (void) rnd_get_bytes((uint8_t *)rmp->rm_mag.rm_seed,
830 HMAC_KEYSIZE, ALWAYS_EXTRACT);
831 }
832 }
833
834 void
kcf_rnd_schedule_timeout(boolean_t do_mech2id)835 kcf_rnd_schedule_timeout(boolean_t do_mech2id)
836 {
837 clock_t ut; /* time in microseconds */
838
839 if (do_mech2id)
840 rngmech_type = crypto_mech2id(SUN_RANDOM);
841
842 /*
843 * The new timeout value is taken from the buffer of random bytes.
844 * We're merely reading the first 32 bits from the buffer here, not
845 * consuming any random bytes.
846 * The timeout multiplier value is a random value between 0.5 sec and
847 * 1.544480 sec (0.5 sec + 0xFF000 microseconds).
848 * The new timeout is TIMEOUT_INTERVAL times that multiplier.
849 */
850 ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000);
851 kcf_rndtimeout_id = timeout(rnd_handler, NULL,
852 TIMEOUT_INTERVAL * drv_usectohz(ut));
853 }
854
855 /*
856 * Called from the driver for a poll on /dev/random
857 * . POLLOUT always succeeds.
858 * . POLLIN and POLLRDNORM will block until a
859 * minimum amount of entropy is available.
860 *
861 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread
862 * will block. When enough random bytes are available, later, the timeout
863 * handler routine will issue the pollwakeup() calls.
864 */
865 void
kcf_rnd_chpoll(short events,int anyyet,short * reventsp,struct pollhead ** phpp)866 kcf_rnd_chpoll(short events, int anyyet, short *reventsp,
867 struct pollhead **phpp)
868 {
869 *reventsp = events & POLLOUT;
870
871 if (events & (POLLIN | POLLRDNORM)) {
872 /*
873 * Sampling of rnbyte_cnt is an atomic
874 * operation. Hence we do not need any locking.
875 */
876 if (rnbyte_cnt >= MINEXTRACTBYTES)
877 *reventsp |= (events & (POLLIN | POLLRDNORM));
878 }
879
880 if (*reventsp == 0 && !anyyet)
881 *phpp = &rnd_pollhead;
882 }
883
884 /*ARGSUSED*/
885 static void
rnd_handler(void * arg)886 rnd_handler(void *arg)
887 {
888 int len = 0;
889
890 if (!rng_prov_found && rng_ok_to_log) {
891 cmn_err(CE_WARN, "No randomness provider enabled for "
892 "/dev/random. Use cryptoadm(1M) to enable a provider.");
893 rng_ok_to_log = B_FALSE;
894 }
895
896 if (num_waiters > 0)
897 /*
898 * Note: len has no relationship with how many bytes
899 * a poll thread needs.
900 */
901 len = MAXEXTRACTBYTES;
902 else if (rnbyte_cnt < RNDPOOLSIZE)
903 len = MINEXTRACTBYTES;
904
905 /*
906 * Only one thread gets to set rngprov_task_idle at a given point
907 * of time and the order of the writes is defined. Also, it is OK
908 * if we read an older value of it and skip the dispatch once
909 * since we will get the correct value during the next time here.
910 * So, no locking is needed here.
911 */
912 if (len > 0 && rngprov_task_idle) {
913 rngprov_task_idle = B_FALSE;
914
915 /*
916 * It is OK if taskq_dispatch fails here. We will retry
917 * the next time around. Meanwhile, a thread doing a
918 * read() will go to the provider directly, if the
919 * cache becomes empty.
920 */
921 if (taskq_dispatch(system_taskq, rngprov_task,
922 (void *)(uintptr_t)len, TQ_NOSLEEP | TQ_NOQUEUE) == 0) {
923 rngprov_task_idle = B_TRUE;
924 }
925 }
926
927 mutex_enter(&rndpool_lock);
928 /*
929 * Wake up threads waiting in poll() or for enough accumulated
930 * random bytes to read from /dev/random. In case a poll() is
931 * concurrent with a read(), the polling process may be woken up
932 * indicating that enough randomness is now available for reading,
933 * and another process *steals* the bits from the pool, causing the
934 * subsequent read() from the first process to block. It is acceptable
935 * since the blocking will eventually end, after the timeout
936 * has expired enough times to honor the read.
937 *
938 * Note - Since we hold the rndpool_lock across the pollwakeup() call
939 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll().
940 */
941 if (rnbyte_cnt >= MINEXTRACTBYTES)
942 pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM);
943
944 if (num_waiters > 0)
945 cv_broadcast(&rndpool_read_cv);
946 mutex_exit(&rndpool_lock);
947
948 kcf_rnd_schedule_timeout(B_FALSE);
949 }
950
951 static void
rndc_addbytes(uint8_t * ptr,size_t len)952 rndc_addbytes(uint8_t *ptr, size_t len)
953 {
954 ASSERT(ptr != NULL && len > 0);
955 ASSERT(rnbyte_cnt <= RNDPOOLSIZE);
956
957 mutex_enter(&rndpool_lock);
958 while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) {
959 rndpool[rindex] ^= *ptr;
960 ptr++; len--;
961 rindex = (rindex + 1) & (RNDPOOLSIZE - 1);
962 rnbyte_cnt++;
963 }
964
965 /* Handle buffer full case */
966 while (len > 0) {
967 rndpool[rindex] ^= *ptr;
968 ptr++; len--;
969 findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1);
970 }
971 mutex_exit(&rndpool_lock);
972 }
973
974 /*
975 * Caller should check len <= rnbyte_cnt under the
976 * rndpool_lock before calling.
977 */
978 static void
rndc_getbytes(uint8_t * ptr,size_t len)979 rndc_getbytes(uint8_t *ptr, size_t len)
980 {
981 ASSERT(MUTEX_HELD(&rndpool_lock));
982 ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE);
983
984 BUMP_RND_STATS(rs_rndcOut, len);
985
986 while (len > 0) {
987 *ptr = rndpool[findex];
988 ptr++; len--;
989 findex = (findex + 1) & (RNDPOOLSIZE - 1);
990 rnbyte_cnt--;
991 }
992 }
993
994 /* Random number exported entry points */
995
996 /*
997 * Mix the supplied bytes into the entropy pool of a kCF
998 * RNG provider.
999 */
1000 int
random_add_pseudo_entropy(uint8_t * ptr,size_t len,uint_t entropy_est)1001 random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est)
1002 {
1003 if (len < 1)
1004 return (-1);
1005
1006 rngprov_seed(ptr, len, entropy_est, 0);
1007
1008 return (0);
1009 }
1010
1011 /*
1012 * Mix the supplied bytes into the entropy pool of a kCF
1013 * RNG provider. Mix immediately.
1014 */
1015 int
random_add_entropy(uint8_t * ptr,size_t len,uint_t entropy_est)1016 random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est)
1017 {
1018 if (len < 1)
1019 return (-1);
1020
1021 rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW);
1022
1023 return (0);
1024 }
1025
1026 /*
1027 * Get bytes from the /dev/urandom generator. This function
1028 * always succeeds. Returns 0.
1029 */
1030 int
random_get_pseudo_bytes(uint8_t * ptr,size_t len)1031 random_get_pseudo_bytes(uint8_t *ptr, size_t len)
1032 {
1033 ASSERT(!mutex_owned(&rndpool_lock));
1034
1035 if (len < 1)
1036 return (0);
1037 return (kcf_rnd_get_pseudo_bytes(ptr, len));
1038 }
1039
1040 /*
1041 * Get bytes from the /dev/random generator. Returns 0
1042 * on success. Returns EAGAIN if there is insufficient entropy.
1043 */
1044 int
random_get_bytes(uint8_t * ptr,size_t len)1045 random_get_bytes(uint8_t *ptr, size_t len)
1046 {
1047 ASSERT(!mutex_owned(&rndpool_lock));
1048
1049 if (len < 1)
1050 return (0);
1051 return (kcf_rnd_get_bytes(ptr, len, B_TRUE));
1052 }
1053
1054 /*
1055 * The two functions below are identical to random_get_pseudo_bytes() and
1056 * random_get_bytes_fips, this function is called for consumers that want
1057 * FIPS 140-2. This function waits until the FIPS boundary can be verified.
1058 */
1059
1060 /*
1061 * Get bytes from the /dev/urandom generator. This function
1062 * always succeeds. Returns 0.
1063 */
1064 int
random_get_pseudo_bytes_fips140(uint8_t * ptr,size_t len)1065 random_get_pseudo_bytes_fips140(uint8_t *ptr, size_t len)
1066 {
1067 ASSERT(!mutex_owned(&rndpool_lock));
1068
1069 mutex_enter(&fips140_mode_lock);
1070 while (global_fips140_mode < FIPS140_MODE_ENABLED) {
1071 cv_wait(&cv_fips140, &fips140_mode_lock);
1072 }
1073 mutex_exit(&fips140_mode_lock);
1074
1075 if (len < 1)
1076 return (0);
1077 return (kcf_rnd_get_pseudo_bytes(ptr, len));
1078 }
1079
1080 /*
1081 * Get bytes from the /dev/random generator. Returns 0
1082 * on success. Returns EAGAIN if there is insufficient entropy.
1083 */
1084 int
random_get_bytes_fips140(uint8_t * ptr,size_t len)1085 random_get_bytes_fips140(uint8_t *ptr, size_t len)
1086 {
1087 ASSERT(!mutex_owned(&rndpool_lock));
1088
1089 mutex_enter(&fips140_mode_lock);
1090 while (global_fips140_mode < FIPS140_MODE_ENABLED) {
1091 cv_wait(&cv_fips140, &fips140_mode_lock);
1092 }
1093 mutex_exit(&fips140_mode_lock);
1094
1095 if (len < 1)
1096 return (0);
1097 return (kcf_rnd_get_bytes(ptr, len, B_TRUE));
1098 }
1099