10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51070Skais * Common Development and Distribution License (the "License"). 61070Skais * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*1920Smcpowers * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * This file implements the interfaces that the /dev/random 300Sstevel@tonic-gate * driver uses for read(2), write(2) and poll(2) on /dev/random or 310Sstevel@tonic-gate * /dev/urandom. It also implements the kernel API - random_add_entropy(), 320Sstevel@tonic-gate * random_get_pseudo_bytes() and random_get_bytes(). 330Sstevel@tonic-gate * 340Sstevel@tonic-gate * We periodically collect random bits from providers which are registered 350Sstevel@tonic-gate * with the Kernel Cryptographic Framework (kCF) as capable of random 360Sstevel@tonic-gate * number generation. The random bits are maintained in a cache and 370Sstevel@tonic-gate * it is used for high quality random numbers (/dev/random) requests. 380Sstevel@tonic-gate * We pick a provider and call its SPI routine, if the cache does not have 390Sstevel@tonic-gate * enough bytes to satisfy a request. 400Sstevel@tonic-gate * 410Sstevel@tonic-gate * /dev/urandom requests use a software-based generator algorithm that uses the 420Sstevel@tonic-gate * random bits in the cache as a seed. We create one pseudo-random generator 430Sstevel@tonic-gate * (for /dev/urandom) per possible CPU on the system, and use it, 440Sstevel@tonic-gate * kmem-magazine-style, to avoid cache line contention. 450Sstevel@tonic-gate * 460Sstevel@tonic-gate * LOCKING HIERARCHY: 470Sstevel@tonic-gate * 1) rmp->rm_lock protects the per-cpu pseudo-random generators. 480Sstevel@tonic-gate * 2) rndpool_lock protects the high-quality randomness pool. 490Sstevel@tonic-gate * It may be locked while a rmp->rm_lock is held. 500Sstevel@tonic-gate * 510Sstevel@tonic-gate * A history note: The kernel API and the software-based algorithms in this 520Sstevel@tonic-gate * file used to be part of the /dev/random driver. 530Sstevel@tonic-gate */ 540Sstevel@tonic-gate 550Sstevel@tonic-gate #include <sys/types.h> 560Sstevel@tonic-gate #include <sys/conf.h> 570Sstevel@tonic-gate #include <sys/sunddi.h> 580Sstevel@tonic-gate #include <sys/disp.h> 590Sstevel@tonic-gate #include <sys/modctl.h> 600Sstevel@tonic-gate #include <sys/ddi.h> 610Sstevel@tonic-gate #include <sys/crypto/common.h> 620Sstevel@tonic-gate #include <sys/crypto/api.h> 630Sstevel@tonic-gate #include <sys/crypto/impl.h> 640Sstevel@tonic-gate #include <sys/crypto/sched_impl.h> 650Sstevel@tonic-gate #include <sys/random.h> 660Sstevel@tonic-gate #include <sys/sha1.h> 670Sstevel@tonic-gate #include <sys/time.h> 680Sstevel@tonic-gate #include <sys/sysmacros.h> 690Sstevel@tonic-gate #include <sys/cpuvar.h> 700Sstevel@tonic-gate #include <sys/taskq.h> 710Sstevel@tonic-gate 720Sstevel@tonic-gate #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 730Sstevel@tonic-gate #define MINEXTRACTBYTES 20 740Sstevel@tonic-gate #define MAXEXTRACTBYTES 1024 750Sstevel@tonic-gate #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 760Sstevel@tonic-gate #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 770Sstevel@tonic-gate 780Sstevel@tonic-gate typedef enum extract_type { 790Sstevel@tonic-gate NONBLOCK_EXTRACT, 800Sstevel@tonic-gate BLOCKING_EXTRACT, 810Sstevel@tonic-gate ALWAYS_EXTRACT 820Sstevel@tonic-gate } extract_type_t; 830Sstevel@tonic-gate 840Sstevel@tonic-gate /* 850Sstevel@tonic-gate * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 860Sstevel@tonic-gate * routines directly instead of using k-API because we can't return any 870Sstevel@tonic-gate * error code in /dev/urandom case and we can get an error using k-API 880Sstevel@tonic-gate * if a mechanism is disabled. 890Sstevel@tonic-gate */ 900Sstevel@tonic-gate #define HASHSIZE 20 910Sstevel@tonic-gate #define HASH_CTX SHA1_CTX 920Sstevel@tonic-gate #define HashInit(ctx) SHA1Init((ctx)) 930Sstevel@tonic-gate #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 940Sstevel@tonic-gate #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 950Sstevel@tonic-gate 960Sstevel@tonic-gate /* HMAC-SHA1 */ 970Sstevel@tonic-gate #define HMAC_KEYSIZE 20 980Sstevel@tonic-gate #define HMAC_BLOCK_SIZE 64 990Sstevel@tonic-gate #define HMAC_KEYSCHED sha1keysched_t 1000Sstevel@tonic-gate #define SET_ENCRYPT_KEY(k, s, ks) hmac_key((k), (s), (ks)) 1010Sstevel@tonic-gate #define HMAC_ENCRYPT(ks, p, s, d) hmac_encr((ks), (uint8_t *)(p), s, d) 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate /* HMAC-SHA1 "keyschedule" */ 1040Sstevel@tonic-gate typedef struct sha1keysched_s { 1050Sstevel@tonic-gate SHA1_CTX ictx; 1060Sstevel@tonic-gate SHA1_CTX octx; 1070Sstevel@tonic-gate } sha1keysched_t; 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /* 1100Sstevel@tonic-gate * Cache of random bytes implemented as a circular buffer. findex and rindex 1110Sstevel@tonic-gate * track the front and back of the circular buffer. 1120Sstevel@tonic-gate */ 1130Sstevel@tonic-gate uint8_t rndpool[RNDPOOLSIZE]; 1140Sstevel@tonic-gate static int findex, rindex; 1150Sstevel@tonic-gate static int rnbyte_cnt; /* Number of bytes in the cache */ 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 1180Sstevel@tonic-gate /* and the global variables */ 1190Sstevel@tonic-gate static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 1200Sstevel@tonic-gate static int num_waiters; /* #threads waiting to read from /dev/random */ 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate static struct pollhead rnd_pollhead; 1230Sstevel@tonic-gate static timeout_id_t kcf_rndtimeout_id; 1240Sstevel@tonic-gate static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 1250Sstevel@tonic-gate rnd_stats_t rnd_stats; 1260Sstevel@tonic-gate 1270Sstevel@tonic-gate static void rndc_addbytes(uint8_t *, size_t); 1280Sstevel@tonic-gate static void rndc_getbytes(uint8_t *ptr, size_t len); 1290Sstevel@tonic-gate static void rnd_handler(void *); 1300Sstevel@tonic-gate static void rnd_alloc_magazines(); 1310Sstevel@tonic-gate static void hmac_key(uint8_t *, size_t, void *); 1320Sstevel@tonic-gate static void hmac_encr(void *, uint8_t *, size_t, uint8_t *); 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate 1350Sstevel@tonic-gate void 1360Sstevel@tonic-gate kcf_rnd_init() 1370Sstevel@tonic-gate { 1380Sstevel@tonic-gate hrtime_t ts; 1390Sstevel@tonic-gate time_t now; 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 1420Sstevel@tonic-gate cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate /* 1450Sstevel@tonic-gate * Add bytes to the cache using 1460Sstevel@tonic-gate * . 2 unpredictable times: high resolution time since the boot-time, 1470Sstevel@tonic-gate * and the current time-of-the day. 1480Sstevel@tonic-gate * This is used only to make the timeout value in the timer 1490Sstevel@tonic-gate * unpredictable. 1500Sstevel@tonic-gate */ 1510Sstevel@tonic-gate ts = gethrtime(); 1520Sstevel@tonic-gate rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 1530Sstevel@tonic-gate 1540Sstevel@tonic-gate (void) drv_getparm(TIME, &now); 1550Sstevel@tonic-gate rndc_addbytes((uint8_t *)&now, sizeof (now)); 1560Sstevel@tonic-gate 1570Sstevel@tonic-gate rnbyte_cnt = 0; 1580Sstevel@tonic-gate findex = rindex = 0; 1590Sstevel@tonic-gate num_waiters = 0; 1600Sstevel@tonic-gate rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate rnd_alloc_magazines(); 1630Sstevel@tonic-gate } 1640Sstevel@tonic-gate 1650Sstevel@tonic-gate /* 1660Sstevel@tonic-gate * Return TRUE if at least one provider exists that can 1670Sstevel@tonic-gate * supply random numbers. 1680Sstevel@tonic-gate */ 1690Sstevel@tonic-gate boolean_t 1700Sstevel@tonic-gate kcf_rngprov_check(void) 1710Sstevel@tonic-gate { 1720Sstevel@tonic-gate int rv; 1730Sstevel@tonic-gate kcf_provider_desc_t *pd; 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 1760Sstevel@tonic-gate NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 1770Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 1780Sstevel@tonic-gate return (B_TRUE); 1790Sstevel@tonic-gate } else 1800Sstevel@tonic-gate return (B_FALSE); 1810Sstevel@tonic-gate } 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate /* 1840Sstevel@tonic-gate * Pick a software-based provider and submit a request to seed 1850Sstevel@tonic-gate * its random number generator. 1860Sstevel@tonic-gate */ 1870Sstevel@tonic-gate static void 188*1920Smcpowers rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags) 1890Sstevel@tonic-gate { 1900Sstevel@tonic-gate kcf_provider_desc_t *pd = NULL; 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate if (kcf_get_sw_prov(rngmech_type, &pd, B_FALSE) == CRYPTO_SUCCESS) { 193*1920Smcpowers (void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len, 194*1920Smcpowers entropy_est, flags, NULL); 1950Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 1960Sstevel@tonic-gate } 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate /* Boot-time tunable for experimentation. */ 2000Sstevel@tonic-gate int kcf_limit_hwrng = 1; 2010Sstevel@tonic-gate 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate /* 2040Sstevel@tonic-gate * This routine is called for blocking reads. 2050Sstevel@tonic-gate * 2060Sstevel@tonic-gate * The argument from_user_api indicates whether the caller is 2070Sstevel@tonic-gate * from userland coming via the /dev/random driver. 2080Sstevel@tonic-gate * 2090Sstevel@tonic-gate * The argument is_taskq_thr indicates whether the caller is 2100Sstevel@tonic-gate * the taskq thread dispatched by the timeout handler routine. 2110Sstevel@tonic-gate * In this case, we cycle through all the providers 2120Sstevel@tonic-gate * submitting a request to each provider to generate random numbers. 2130Sstevel@tonic-gate * 2140Sstevel@tonic-gate * For other cases, we pick a provider and submit a request to generate 2150Sstevel@tonic-gate * random numbers. We retry using another provider if we get an error. 2160Sstevel@tonic-gate * 2170Sstevel@tonic-gate * Returns the number of bytes that are written to 'ptr'. Returns -1 2180Sstevel@tonic-gate * if no provider is found. ptr and need are unchanged. 2190Sstevel@tonic-gate */ 2200Sstevel@tonic-gate static int 2210Sstevel@tonic-gate rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 2220Sstevel@tonic-gate boolean_t is_taskq_thr) 2230Sstevel@tonic-gate { 2240Sstevel@tonic-gate int rv; 2250Sstevel@tonic-gate int prov_cnt = 0; 2260Sstevel@tonic-gate int total_bytes = 0; 2270Sstevel@tonic-gate kcf_provider_desc_t *pd; 2280Sstevel@tonic-gate kcf_req_params_t params; 2290Sstevel@tonic-gate kcf_prov_tried_t *list = NULL; 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 2320Sstevel@tonic-gate list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate prov_cnt++; 2350Sstevel@tonic-gate /* 2360Sstevel@tonic-gate * Typically a hardware RNG is a multi-purpose 2370Sstevel@tonic-gate * crypto card and hence we do not want to overload the card 2380Sstevel@tonic-gate * just for random numbers. The following check is to prevent 2390Sstevel@tonic-gate * a user process from hogging the hardware RNG. Note that we 2400Sstevel@tonic-gate * still use the hardware RNG from the periodically run 2410Sstevel@tonic-gate * taskq thread. 2420Sstevel@tonic-gate */ 2430Sstevel@tonic-gate if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 2440Sstevel@tonic-gate kcf_limit_hwrng == 1) { 2450Sstevel@tonic-gate ASSERT(is_taskq_thr == B_FALSE); 2460Sstevel@tonic-gate goto try_next; 2470Sstevel@tonic-gate } 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 250*1920Smcpowers pd->pd_sid, ptr, need, 0, 0); 2510Sstevel@tonic-gate rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 2520Sstevel@tonic-gate ASSERT(rv != CRYPTO_QUEUED); 2530Sstevel@tonic-gate 2540Sstevel@tonic-gate if (rv == CRYPTO_SUCCESS) { 2550Sstevel@tonic-gate total_bytes += need; 2560Sstevel@tonic-gate if (is_taskq_thr) 2570Sstevel@tonic-gate rndc_addbytes(ptr, need); 2580Sstevel@tonic-gate else { 2590Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 2600Sstevel@tonic-gate break; 2610Sstevel@tonic-gate } 2620Sstevel@tonic-gate } 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 2650Sstevel@tonic-gate try_next: 2660Sstevel@tonic-gate /* Add pd to the linked list of providers tried. */ 2670Sstevel@tonic-gate if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 2680Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 2690Sstevel@tonic-gate break; 2700Sstevel@tonic-gate } 2710Sstevel@tonic-gate } 2720Sstevel@tonic-gate 2730Sstevel@tonic-gate } 2740Sstevel@tonic-gate 2750Sstevel@tonic-gate if (list != NULL) 2760Sstevel@tonic-gate kcf_free_triedlist(list); 2770Sstevel@tonic-gate 2780Sstevel@tonic-gate if (prov_cnt == 0) { /* no provider could be found. */ 2790Sstevel@tonic-gate return (-1); 2800Sstevel@tonic-gate } 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate return (total_bytes); 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate static void 2860Sstevel@tonic-gate notify_done(void *arg, int rv) 2870Sstevel@tonic-gate { 2880Sstevel@tonic-gate uchar_t *rndbuf = arg; 2890Sstevel@tonic-gate 2900Sstevel@tonic-gate if (rv == CRYPTO_SUCCESS) 2910Sstevel@tonic-gate rndc_addbytes(rndbuf, MINEXTRACTBYTES); 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate bzero(rndbuf, MINEXTRACTBYTES); 2940Sstevel@tonic-gate kmem_free(rndbuf, MINEXTRACTBYTES); 2950Sstevel@tonic-gate } 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate /* 2980Sstevel@tonic-gate * Cycle through all the providers submitting a request to each provider 2990Sstevel@tonic-gate * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 3000Sstevel@tonic-gate * and ALWAYS_EXTRACT. 3010Sstevel@tonic-gate * 3020Sstevel@tonic-gate * Returns the number of bytes that are written to 'ptr'. Returns -1 3030Sstevel@tonic-gate * if no provider is found. ptr and len are unchanged. 3040Sstevel@tonic-gate */ 3050Sstevel@tonic-gate static int 3060Sstevel@tonic-gate rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 3070Sstevel@tonic-gate { 3080Sstevel@tonic-gate int rv, blen, total_bytes; 3090Sstevel@tonic-gate uchar_t *rndbuf; 3100Sstevel@tonic-gate kcf_provider_desc_t *pd; 3110Sstevel@tonic-gate kcf_req_params_t params; 3120Sstevel@tonic-gate crypto_call_req_t req; 3130Sstevel@tonic-gate kcf_prov_tried_t *list = NULL; 3140Sstevel@tonic-gate int prov_cnt = 0; 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate blen = 0; 3170Sstevel@tonic-gate total_bytes = 0; 3180Sstevel@tonic-gate req.cr_flag = CRYPTO_SKIP_REQID; 3190Sstevel@tonic-gate req.cr_callback_func = notify_done; 3200Sstevel@tonic-gate 3210Sstevel@tonic-gate while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 3220Sstevel@tonic-gate list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate prov_cnt ++; 3250Sstevel@tonic-gate switch (pd->pd_prov_type) { 3260Sstevel@tonic-gate case CRYPTO_HW_PROVIDER: 3270Sstevel@tonic-gate /* See comments in rngprov_getbytes() */ 3280Sstevel@tonic-gate if (from_user_api && kcf_limit_hwrng == 1) 3290Sstevel@tonic-gate goto try_next; 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate /* 3320Sstevel@tonic-gate * We have to allocate a buffer here as we can not 3330Sstevel@tonic-gate * assume that the input buffer will remain valid 3340Sstevel@tonic-gate * when the callback comes. We use a fixed size buffer 3350Sstevel@tonic-gate * to simplify the book keeping. 3360Sstevel@tonic-gate */ 3370Sstevel@tonic-gate rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 3380Sstevel@tonic-gate if (rndbuf == NULL) { 3390Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 3400Sstevel@tonic-gate if (list != NULL) 3410Sstevel@tonic-gate kcf_free_triedlist(list); 3420Sstevel@tonic-gate return (total_bytes); 3430Sstevel@tonic-gate } 3440Sstevel@tonic-gate req.cr_callback_arg = rndbuf; 3450Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 3460Sstevel@tonic-gate KCF_OP_RANDOM_GENERATE, 347*1920Smcpowers pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); 3480Sstevel@tonic-gate break; 3490Sstevel@tonic-gate 3500Sstevel@tonic-gate case CRYPTO_SW_PROVIDER: 3510Sstevel@tonic-gate /* 3520Sstevel@tonic-gate * We do not need to allocate a buffer in the software 3530Sstevel@tonic-gate * provider case as there is no callback involved. We 3540Sstevel@tonic-gate * avoid any extra data copy by directly passing 'ptr'. 3550Sstevel@tonic-gate */ 3560Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 3570Sstevel@tonic-gate KCF_OP_RANDOM_GENERATE, 358*1920Smcpowers pd->pd_sid, ptr, len, 0, 0); 3590Sstevel@tonic-gate break; 3600Sstevel@tonic-gate } 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 3630Sstevel@tonic-gate if (rv == CRYPTO_SUCCESS) { 3640Sstevel@tonic-gate switch (pd->pd_prov_type) { 3650Sstevel@tonic-gate case CRYPTO_HW_PROVIDER: 3660Sstevel@tonic-gate /* 3670Sstevel@tonic-gate * Since we have the input buffer handy, 3680Sstevel@tonic-gate * we directly copy to it rather than 3690Sstevel@tonic-gate * adding to the pool. 3700Sstevel@tonic-gate */ 3710Sstevel@tonic-gate blen = min(MINEXTRACTBYTES, len); 3720Sstevel@tonic-gate bcopy(rndbuf, ptr, blen); 3730Sstevel@tonic-gate if (len < MINEXTRACTBYTES) 3740Sstevel@tonic-gate rndc_addbytes(rndbuf + len, 3750Sstevel@tonic-gate MINEXTRACTBYTES - len); 3760Sstevel@tonic-gate ptr += blen; 3770Sstevel@tonic-gate len -= blen; 3780Sstevel@tonic-gate total_bytes += blen; 3790Sstevel@tonic-gate break; 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate case CRYPTO_SW_PROVIDER: 3820Sstevel@tonic-gate total_bytes += len; 3830Sstevel@tonic-gate len = 0; 3840Sstevel@tonic-gate break; 3850Sstevel@tonic-gate } 3860Sstevel@tonic-gate } 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate /* 3890Sstevel@tonic-gate * We free the buffer in the callback routine 3900Sstevel@tonic-gate * for the CRYPTO_QUEUED case. 3910Sstevel@tonic-gate */ 3920Sstevel@tonic-gate if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 3930Sstevel@tonic-gate rv != CRYPTO_QUEUED) { 3940Sstevel@tonic-gate bzero(rndbuf, MINEXTRACTBYTES); 3950Sstevel@tonic-gate kmem_free(rndbuf, MINEXTRACTBYTES); 3960Sstevel@tonic-gate } 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate if (len == 0) { 3990Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 4000Sstevel@tonic-gate break; 4010Sstevel@tonic-gate } 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate if (rv != CRYPTO_SUCCESS) { 4040Sstevel@tonic-gate try_next: 4050Sstevel@tonic-gate /* Add pd to the linked list of providers tried. */ 4060Sstevel@tonic-gate if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 4070Sstevel@tonic-gate NULL) { 4080Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 4090Sstevel@tonic-gate break; 4100Sstevel@tonic-gate } 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate if (list != NULL) { 4150Sstevel@tonic-gate kcf_free_triedlist(list); 4160Sstevel@tonic-gate } 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate if (prov_cnt == 0) { /* no provider could be found. */ 4190Sstevel@tonic-gate return (-1); 4200Sstevel@tonic-gate } 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate return (total_bytes); 4230Sstevel@tonic-gate } 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate static void 4260Sstevel@tonic-gate rngprov_task(void *arg) 4270Sstevel@tonic-gate { 4280Sstevel@tonic-gate int len = (int)(uintptr_t)arg; 4290Sstevel@tonic-gate uchar_t tbuf[MAXEXTRACTBYTES]; 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate ASSERT(len <= MAXEXTRACTBYTES); 4320Sstevel@tonic-gate if (rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE) == -1) { 4330Sstevel@tonic-gate cmn_err(CE_WARN, "No randomness provider enabled for " 4340Sstevel@tonic-gate "/dev/random. Use cryptoadm(1M) to enable a provider."); 4350Sstevel@tonic-gate } 4360Sstevel@tonic-gate } 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate /* 4390Sstevel@tonic-gate * Returns "len" random or pseudo-random bytes in *ptr. 4400Sstevel@tonic-gate * Will block if not enough random bytes are available and the 4410Sstevel@tonic-gate * call is blocking. 4420Sstevel@tonic-gate * 4430Sstevel@tonic-gate * Called with rndpool_lock held (allowing caller to do optimistic locking; 4440Sstevel@tonic-gate * releases the lock before return). 4450Sstevel@tonic-gate */ 4460Sstevel@tonic-gate static int 4470Sstevel@tonic-gate rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 4480Sstevel@tonic-gate boolean_t from_user_api) 4490Sstevel@tonic-gate { 4500Sstevel@tonic-gate int bytes; 4510Sstevel@tonic-gate size_t got; 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate ASSERT(mutex_owned(&rndpool_lock)); 4540Sstevel@tonic-gate /* 4550Sstevel@tonic-gate * Check if the request can be satisfied from the cache 4560Sstevel@tonic-gate * of random bytes. 4570Sstevel@tonic-gate */ 4580Sstevel@tonic-gate if (len <= rnbyte_cnt) { 4590Sstevel@tonic-gate rndc_getbytes(ptr, len); 4600Sstevel@tonic-gate mutex_exit(&rndpool_lock); 4610Sstevel@tonic-gate return (0); 4620Sstevel@tonic-gate } 4630Sstevel@tonic-gate mutex_exit(&rndpool_lock); 4640Sstevel@tonic-gate 4650Sstevel@tonic-gate switch (how) { 4660Sstevel@tonic-gate case BLOCKING_EXTRACT: 4670Sstevel@tonic-gate if ((got = rngprov_getbytes(ptr, len, from_user_api, 4680Sstevel@tonic-gate B_FALSE)) == -1) 4690Sstevel@tonic-gate break; /* No provider found */ 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate if (got == len) 4720Sstevel@tonic-gate return (0); 4730Sstevel@tonic-gate len -= got; 4740Sstevel@tonic-gate ptr += got; 4750Sstevel@tonic-gate break; 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate case NONBLOCK_EXTRACT: 4780Sstevel@tonic-gate case ALWAYS_EXTRACT: 4790Sstevel@tonic-gate if ((got = rngprov_getbytes_nblk(ptr, len, 4800Sstevel@tonic-gate from_user_api)) == -1) { 4810Sstevel@tonic-gate /* No provider found */ 4820Sstevel@tonic-gate if (how == NONBLOCK_EXTRACT) { 4830Sstevel@tonic-gate return (EAGAIN); 4840Sstevel@tonic-gate } 4850Sstevel@tonic-gate } else { 4860Sstevel@tonic-gate if (got == len) 4870Sstevel@tonic-gate return (0); 4880Sstevel@tonic-gate len -= got; 4890Sstevel@tonic-gate ptr += got; 4900Sstevel@tonic-gate } 4910Sstevel@tonic-gate if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 4920Sstevel@tonic-gate return (EAGAIN); 4930Sstevel@tonic-gate break; 4940Sstevel@tonic-gate } 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate mutex_enter(&rndpool_lock); 4970Sstevel@tonic-gate while (len > 0) { 4980Sstevel@tonic-gate if (how == BLOCKING_EXTRACT) { 4990Sstevel@tonic-gate /* Check if there is enough */ 5000Sstevel@tonic-gate while (rnbyte_cnt < MINEXTRACTBYTES) { 5010Sstevel@tonic-gate num_waiters++; 5020Sstevel@tonic-gate if (cv_wait_sig(&rndpool_read_cv, 5030Sstevel@tonic-gate &rndpool_lock) == 0) { 5040Sstevel@tonic-gate num_waiters--; 5050Sstevel@tonic-gate mutex_exit(&rndpool_lock); 5060Sstevel@tonic-gate return (EINTR); 5070Sstevel@tonic-gate } 5080Sstevel@tonic-gate num_waiters--; 5090Sstevel@tonic-gate } 5100Sstevel@tonic-gate } 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate /* Figure out how many bytes to extract */ 5130Sstevel@tonic-gate bytes = min(len, rnbyte_cnt); 5140Sstevel@tonic-gate rndc_getbytes(ptr, bytes); 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate len -= bytes; 5170Sstevel@tonic-gate ptr += bytes; 5180Sstevel@tonic-gate 5190Sstevel@tonic-gate if (len > 0 && how == ALWAYS_EXTRACT) { 5200Sstevel@tonic-gate /* 5210Sstevel@tonic-gate * There are not enough bytes, but we can not block. 5220Sstevel@tonic-gate * This only happens in the case of /dev/urandom which 5230Sstevel@tonic-gate * runs an additional generation algorithm. So, there 5240Sstevel@tonic-gate * is no problem. 5250Sstevel@tonic-gate */ 5260Sstevel@tonic-gate while (len > 0) { 5270Sstevel@tonic-gate *ptr = rndpool[findex]; 5280Sstevel@tonic-gate ptr++; len--; 5290Sstevel@tonic-gate rindex = findex = (findex + 1) & 5300Sstevel@tonic-gate (RNDPOOLSIZE - 1); 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate break; 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate } 5350Sstevel@tonic-gate 5360Sstevel@tonic-gate mutex_exit(&rndpool_lock); 5370Sstevel@tonic-gate return (0); 5380Sstevel@tonic-gate } 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate int 5410Sstevel@tonic-gate kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 5420Sstevel@tonic-gate boolean_t from_user_api) 5430Sstevel@tonic-gate { 5440Sstevel@tonic-gate extract_type_t how; 5450Sstevel@tonic-gate int error; 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 5480Sstevel@tonic-gate mutex_enter(&rndpool_lock); 5490Sstevel@tonic-gate if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 5500Sstevel@tonic-gate return (error); 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate BUMP_RND_STATS(rs_rndOut, len); 5530Sstevel@tonic-gate return (0); 5540Sstevel@tonic-gate } 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate /* 5570Sstevel@tonic-gate * Revisit this if the structs grow or we come up with a better way 5580Sstevel@tonic-gate * of cache-line-padding structures. 5590Sstevel@tonic-gate */ 5600Sstevel@tonic-gate #define RND_CPU_CACHE_SIZE 64 5610Sstevel@tonic-gate #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*5 5620Sstevel@tonic-gate #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 5630Sstevel@tonic-gate (sizeof (kmutex_t) + 3*sizeof (uint8_t *) + sizeof (HMAC_KEYSCHED) + \ 5640Sstevel@tonic-gate sizeof (uint64_t) + 3*sizeof (uint32_t) + sizeof (rnd_stats_t))) 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate /* 5670Sstevel@tonic-gate * Per-CPU random state. Somewhat like like kmem's magazines, this provides 5680Sstevel@tonic-gate * a per-CPU instance of the pseudo-random generator. We have it much easier 5690Sstevel@tonic-gate * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 5700Sstevel@tonic-gate * 5710Sstevel@tonic-gate * Note that this usage is preemption-safe; a thread 5720Sstevel@tonic-gate * entering a critical section remembers which generator it locked 5730Sstevel@tonic-gate * and unlocks the same one; should it be preempted and wind up running on 5740Sstevel@tonic-gate * a different CPU, there will be a brief period of increased contention 5750Sstevel@tonic-gate * before it exits the critical section but nothing will melt. 5760Sstevel@tonic-gate */ 5770Sstevel@tonic-gate typedef struct rndmag_s 5780Sstevel@tonic-gate { 5790Sstevel@tonic-gate kmutex_t rm_lock; 5800Sstevel@tonic-gate uint8_t *rm_buffer; /* Start of buffer */ 5810Sstevel@tonic-gate uint8_t *rm_eptr; /* End of buffer */ 5820Sstevel@tonic-gate uint8_t *rm_rptr; /* Current read pointer */ 5830Sstevel@tonic-gate HMAC_KEYSCHED rm_ks; /* seed */ 5840Sstevel@tonic-gate uint64_t rm_counter; /* rotating counter for extracting */ 5850Sstevel@tonic-gate uint32_t rm_oblocks; /* time to rekey? */ 5860Sstevel@tonic-gate uint32_t rm_ofuzz; /* Rekey backoff state */ 5870Sstevel@tonic-gate uint32_t rm_olimit; /* Hard rekey limit */ 5880Sstevel@tonic-gate rnd_stats_t rm_stats; /* Per-CPU Statistics */ 5890Sstevel@tonic-gate uint8_t rm_pad[RND_CPU_PAD]; 5900Sstevel@tonic-gate } rndmag_t; 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * Generate random bytes for /dev/urandom by encrypting a 5940Sstevel@tonic-gate * rotating counter with a key created from bytes extracted 5950Sstevel@tonic-gate * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 5960Sstevel@tonic-gate * is generated before a new key is obtained. 5970Sstevel@tonic-gate * 5980Sstevel@tonic-gate * Note that callers to this routine are likely to assume it can't fail. 5990Sstevel@tonic-gate * 6000Sstevel@tonic-gate * Called with rmp locked; releases lock. 6010Sstevel@tonic-gate */ 6020Sstevel@tonic-gate static int 6030Sstevel@tonic-gate rnd_generate_pseudo_bytes(rndmag_t *rmp, uint8_t *ptr, size_t len) 6040Sstevel@tonic-gate { 6050Sstevel@tonic-gate size_t bytes = len; 6060Sstevel@tonic-gate int nblock, size; 6070Sstevel@tonic-gate uint32_t oblocks; 6080Sstevel@tonic-gate uint8_t digest[HASHSIZE]; 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate ASSERT(mutex_owned(&rmp->rm_lock)); 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate /* Nothing is being asked */ 6130Sstevel@tonic-gate if (len == 0) { 6140Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 6150Sstevel@tonic-gate return (0); 6160Sstevel@tonic-gate } 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate nblock = howmany(len, HASHSIZE); 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate rmp->rm_oblocks += nblock; 6210Sstevel@tonic-gate oblocks = rmp->rm_oblocks; 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate do { 6240Sstevel@tonic-gate if (oblocks >= rmp->rm_olimit) { 6250Sstevel@tonic-gate hrtime_t timestamp; 6260Sstevel@tonic-gate uint8_t key[HMAC_KEYSIZE]; 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate /* 6290Sstevel@tonic-gate * Contention-avoiding rekey: see if 6300Sstevel@tonic-gate * the pool is locked, and if so, wait a bit. 6310Sstevel@tonic-gate * Do an 'exponential back-in' to ensure we don't 6320Sstevel@tonic-gate * run too long without rekey. 6330Sstevel@tonic-gate */ 6340Sstevel@tonic-gate if (rmp->rm_ofuzz) { 6350Sstevel@tonic-gate /* 6360Sstevel@tonic-gate * Decaying exponential back-in for rekey. 6370Sstevel@tonic-gate */ 6380Sstevel@tonic-gate if ((rnbyte_cnt < MINEXTRACTBYTES) || 6390Sstevel@tonic-gate (!mutex_tryenter(&rndpool_lock))) { 6400Sstevel@tonic-gate rmp->rm_olimit += rmp->rm_ofuzz; 6410Sstevel@tonic-gate rmp->rm_ofuzz >>= 1; 6420Sstevel@tonic-gate goto punt; 6430Sstevel@tonic-gate } 6440Sstevel@tonic-gate } else { 6450Sstevel@tonic-gate mutex_enter(&rndpool_lock); 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate /* Get a new chunk of entropy */ 6490Sstevel@tonic-gate (void) rnd_get_bytes(key, HMAC_KEYSIZE, 6500Sstevel@tonic-gate ALWAYS_EXTRACT, B_FALSE); 6510Sstevel@tonic-gate 6520Sstevel@tonic-gate /* Set up key */ 6530Sstevel@tonic-gate SET_ENCRYPT_KEY(key, HMAC_KEYSIZE, &rmp->rm_ks); 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate /* Get new counter value by encrypting timestamp */ 6560Sstevel@tonic-gate timestamp = gethrtime(); 6570Sstevel@tonic-gate HMAC_ENCRYPT(&rmp->rm_ks, ×tamp, 6580Sstevel@tonic-gate sizeof (timestamp), digest); 6590Sstevel@tonic-gate rmp->rm_olimit = PRNG_MAXOBLOCKS/2; 6600Sstevel@tonic-gate rmp->rm_ofuzz = PRNG_MAXOBLOCKS/4; 6610Sstevel@tonic-gate bcopy(digest, &rmp->rm_counter, sizeof (uint64_t)); 6620Sstevel@tonic-gate oblocks = 0; 6630Sstevel@tonic-gate rmp->rm_oblocks = nblock; 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate punt: 6660Sstevel@tonic-gate /* Hash counter to produce prn stream */ 6670Sstevel@tonic-gate if (bytes >= HASHSIZE) { 6680Sstevel@tonic-gate size = HASHSIZE; 6690Sstevel@tonic-gate HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 6700Sstevel@tonic-gate sizeof (rmp->rm_counter), ptr); 6710Sstevel@tonic-gate } else { 6720Sstevel@tonic-gate size = min(bytes, HASHSIZE); 6730Sstevel@tonic-gate HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 6740Sstevel@tonic-gate sizeof (rmp->rm_counter), digest); 6750Sstevel@tonic-gate bcopy(digest, ptr, size); 6760Sstevel@tonic-gate } 6770Sstevel@tonic-gate ptr += size; 6780Sstevel@tonic-gate bytes -= size; 6790Sstevel@tonic-gate rmp->rm_counter++; 6800Sstevel@tonic-gate oblocks++; 6810Sstevel@tonic-gate nblock--; 6820Sstevel@tonic-gate } while (bytes > 0); 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 6850Sstevel@tonic-gate return (0); 6860Sstevel@tonic-gate } 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate /* 6890Sstevel@tonic-gate * Per-CPU Random magazines. 6900Sstevel@tonic-gate */ 6910Sstevel@tonic-gate static rndmag_t *rndmag; 6920Sstevel@tonic-gate static uint8_t *rndbuf; 6930Sstevel@tonic-gate static size_t rndmag_total; 6940Sstevel@tonic-gate /* 6950Sstevel@tonic-gate * common/os/cpu.c says that platform support code can shrinkwrap 6960Sstevel@tonic-gate * max_ncpus. On the off chance that we get loaded very early, we 6970Sstevel@tonic-gate * read it exactly once, to copy it here. 6980Sstevel@tonic-gate */ 6990Sstevel@tonic-gate static uint32_t random_max_ncpus = 0; 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate /* 7020Sstevel@tonic-gate * Boot-time tunables, for experimentation. 7030Sstevel@tonic-gate */ 7041070Skais size_t rndmag_threshold = 2560; 7051070Skais size_t rndbuf_len = 5120; 706445Skrishna size_t rndmag_size = 1280; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate int 7100Sstevel@tonic-gate kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 7110Sstevel@tonic-gate { 7120Sstevel@tonic-gate rndmag_t *rmp; 7130Sstevel@tonic-gate uint8_t *cptr, *eptr; 7140Sstevel@tonic-gate 7150Sstevel@tonic-gate /* 7160Sstevel@tonic-gate * Anyone who asks for zero bytes of randomness should get slapped. 7170Sstevel@tonic-gate */ 7180Sstevel@tonic-gate ASSERT(len > 0); 7190Sstevel@tonic-gate 7200Sstevel@tonic-gate /* 7210Sstevel@tonic-gate * Fast path. 7220Sstevel@tonic-gate */ 7230Sstevel@tonic-gate for (;;) { 7240Sstevel@tonic-gate rmp = &rndmag[CPU->cpu_seqid]; 7250Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 7260Sstevel@tonic-gate 7270Sstevel@tonic-gate /* 7280Sstevel@tonic-gate * Big requests bypass buffer and tail-call the 7290Sstevel@tonic-gate * generate routine directly. 7300Sstevel@tonic-gate */ 7310Sstevel@tonic-gate if (len > rndmag_threshold) { 7320Sstevel@tonic-gate BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 7330Sstevel@tonic-gate return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 7340Sstevel@tonic-gate } 7350Sstevel@tonic-gate 7360Sstevel@tonic-gate cptr = rmp->rm_rptr; 7370Sstevel@tonic-gate eptr = cptr + len; 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate if (eptr <= rmp->rm_eptr) { 7400Sstevel@tonic-gate rmp->rm_rptr = eptr; 7410Sstevel@tonic-gate bcopy(cptr, ptr, len); 7420Sstevel@tonic-gate BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 7430Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7440Sstevel@tonic-gate 7450Sstevel@tonic-gate return (0); 7460Sstevel@tonic-gate } 7470Sstevel@tonic-gate /* 7480Sstevel@tonic-gate * End fast path. 7490Sstevel@tonic-gate */ 7500Sstevel@tonic-gate rmp->rm_rptr = rmp->rm_buffer; 7510Sstevel@tonic-gate /* 7520Sstevel@tonic-gate * Note: We assume the generate routine always succeeds 7530Sstevel@tonic-gate * in this case (because it does at present..) 7540Sstevel@tonic-gate * It also always releases rm_lock. 7550Sstevel@tonic-gate */ 7560Sstevel@tonic-gate (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_buffer, 7570Sstevel@tonic-gate rndbuf_len); 7580Sstevel@tonic-gate } 7590Sstevel@tonic-gate } 7600Sstevel@tonic-gate 7610Sstevel@tonic-gate /* 7620Sstevel@tonic-gate * We set up (empty) magazines for all of max_ncpus, possibly wasting a 7630Sstevel@tonic-gate * little memory on big systems that don't have the full set installed. 7640Sstevel@tonic-gate * See above; "empty" means "rptr equal to eptr"; this will trigger the 7650Sstevel@tonic-gate * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 7660Sstevel@tonic-gate * 7670Sstevel@tonic-gate * TODO: make rndmag_size tunable at run time! 7680Sstevel@tonic-gate */ 7690Sstevel@tonic-gate static void 7700Sstevel@tonic-gate rnd_alloc_magazines() 7710Sstevel@tonic-gate { 7720Sstevel@tonic-gate rndmag_t *rmp; 7730Sstevel@tonic-gate int i; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate rndbuf_len = roundup(rndbuf_len, HASHSIZE); 7760Sstevel@tonic-gate if (rndmag_size < rndbuf_len) 7770Sstevel@tonic-gate rndmag_size = rndbuf_len; 7780Sstevel@tonic-gate rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate random_max_ncpus = max_ncpus; 7810Sstevel@tonic-gate rndmag_total = rndmag_size * random_max_ncpus; 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 7840Sstevel@tonic-gate rndmag = kmem_zalloc(sizeof (rndmag_t) * random_max_ncpus, KM_SLEEP); 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate for (i = 0; i < random_max_ncpus; i++) { 7870Sstevel@tonic-gate uint8_t *buf; 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate rmp = &rndmag[i]; 7900Sstevel@tonic-gate mutex_init(&rmp->rm_lock, NULL, MUTEX_DRIVER, NULL); 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate buf = rndbuf + i * rndmag_size; 7930Sstevel@tonic-gate 7940Sstevel@tonic-gate rmp->rm_buffer = buf; 7950Sstevel@tonic-gate rmp->rm_eptr = buf + rndbuf_len; 7960Sstevel@tonic-gate rmp->rm_rptr = buf + rndbuf_len; 7970Sstevel@tonic-gate rmp->rm_oblocks = 1; 7980Sstevel@tonic-gate } 7990Sstevel@tonic-gate } 8000Sstevel@tonic-gate 8010Sstevel@tonic-gate void 8020Sstevel@tonic-gate kcf_rnd_schedule_timeout(boolean_t do_mech2id) 8030Sstevel@tonic-gate { 8040Sstevel@tonic-gate clock_t ut; /* time in microseconds */ 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate if (do_mech2id) 8070Sstevel@tonic-gate rngmech_type = crypto_mech2id(SUN_RANDOM); 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate /* 8100Sstevel@tonic-gate * The new timeout value is taken from the buffer of random bytes. 8110Sstevel@tonic-gate * We're merely reading the first 32 bits from the buffer here, not 8120Sstevel@tonic-gate * consuming any random bytes. 8130Sstevel@tonic-gate * The timeout multiplier value is a random value between 0.5 sec and 8140Sstevel@tonic-gate * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 8150Sstevel@tonic-gate * The new timeout is TIMEOUT_INTERVAL times that multiplier. 8160Sstevel@tonic-gate */ 8170Sstevel@tonic-gate ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 8180Sstevel@tonic-gate kcf_rndtimeout_id = timeout(rnd_handler, NULL, 8190Sstevel@tonic-gate TIMEOUT_INTERVAL * drv_usectohz(ut)); 8200Sstevel@tonic-gate } 8210Sstevel@tonic-gate 8220Sstevel@tonic-gate /* 8230Sstevel@tonic-gate * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 8240Sstevel@tonic-gate * will block. When enough random bytes are available, later, the timeout 8250Sstevel@tonic-gate * handler routine will issue the pollwakeup() calls. 8260Sstevel@tonic-gate */ 8270Sstevel@tonic-gate void 8280Sstevel@tonic-gate kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 8290Sstevel@tonic-gate { 8300Sstevel@tonic-gate /* 8310Sstevel@tonic-gate * Sampling of rnbyte_cnt is an atomic 8320Sstevel@tonic-gate * operation. Hence we do not need any locking. 8330Sstevel@tonic-gate */ 8340Sstevel@tonic-gate if (rnbyte_cnt >= MINEXTRACTBYTES) { 8350Sstevel@tonic-gate *reventsp |= (POLLIN | POLLRDNORM); 8360Sstevel@tonic-gate } else { 8370Sstevel@tonic-gate *reventsp = 0; 8380Sstevel@tonic-gate if (!anyyet) 8390Sstevel@tonic-gate *phpp = &rnd_pollhead; 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate } 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate /*ARGSUSED*/ 8440Sstevel@tonic-gate static void 8450Sstevel@tonic-gate rnd_handler(void *arg) 8460Sstevel@tonic-gate { 8470Sstevel@tonic-gate int len = 0; 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate if (num_waiters > 0) 8500Sstevel@tonic-gate len = MAXEXTRACTBYTES; 8510Sstevel@tonic-gate else if (rnbyte_cnt < RNDPOOLSIZE) 8520Sstevel@tonic-gate len = MINEXTRACTBYTES; 8530Sstevel@tonic-gate 8540Sstevel@tonic-gate if (len > 0) { 8550Sstevel@tonic-gate (void) taskq_dispatch(system_taskq, rngprov_task, 8560Sstevel@tonic-gate (void *)(uintptr_t)len, TQ_NOSLEEP); 8570Sstevel@tonic-gate } else if (!kcf_rngprov_check()) { 8580Sstevel@tonic-gate cmn_err(CE_WARN, "No randomness provider enabled for " 8590Sstevel@tonic-gate "/dev/random. Use cryptoadm(1M) to enable a provider."); 8600Sstevel@tonic-gate } 8610Sstevel@tonic-gate 8620Sstevel@tonic-gate mutex_enter(&rndpool_lock); 8630Sstevel@tonic-gate /* 8640Sstevel@tonic-gate * Wake up threads waiting in poll() or for enough accumulated 8650Sstevel@tonic-gate * random bytes to read from /dev/random. In case a poll() is 8660Sstevel@tonic-gate * concurrent with a read(), the polling process may be woken up 8670Sstevel@tonic-gate * indicating that enough randomness is now available for reading, 8680Sstevel@tonic-gate * and another process *steals* the bits from the pool, causing the 8690Sstevel@tonic-gate * subsequent read() from the first process to block. It is acceptable 8700Sstevel@tonic-gate * since the blocking will eventually end, after the timeout 8710Sstevel@tonic-gate * has expired enough times to honor the read. 8720Sstevel@tonic-gate * 8730Sstevel@tonic-gate * Note - Since we hold the rndpool_lock across the pollwakeup() call 8740Sstevel@tonic-gate * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 8750Sstevel@tonic-gate */ 8760Sstevel@tonic-gate if (rnbyte_cnt >= MINEXTRACTBYTES) 8770Sstevel@tonic-gate pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 8780Sstevel@tonic-gate 8790Sstevel@tonic-gate if (num_waiters > 0) 8800Sstevel@tonic-gate cv_broadcast(&rndpool_read_cv); 8810Sstevel@tonic-gate mutex_exit(&rndpool_lock); 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate kcf_rnd_schedule_timeout(B_FALSE); 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate /* Hashing functions */ 8870Sstevel@tonic-gate 8880Sstevel@tonic-gate static void 8890Sstevel@tonic-gate hmac_key(uint8_t *key, size_t keylen, void *buf) 8900Sstevel@tonic-gate { 8910Sstevel@tonic-gate uint32_t *ip, *op; 8920Sstevel@tonic-gate uint32_t ipad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 8930Sstevel@tonic-gate uint32_t opad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 8940Sstevel@tonic-gate HASH_CTX *icontext, *ocontext; 8950Sstevel@tonic-gate int i; 8960Sstevel@tonic-gate int nints; 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate icontext = buf; 8990Sstevel@tonic-gate ocontext = (SHA1_CTX *)((uint8_t *)buf + sizeof (HASH_CTX)); 9000Sstevel@tonic-gate 9010Sstevel@tonic-gate bzero((uchar_t *)ipad, HMAC_BLOCK_SIZE); 9020Sstevel@tonic-gate bzero((uchar_t *)opad, HMAC_BLOCK_SIZE); 9030Sstevel@tonic-gate bcopy(key, (uchar_t *)ipad, keylen); 9040Sstevel@tonic-gate bcopy(key, (uchar_t *)opad, keylen); 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate /* 9070Sstevel@tonic-gate * XOR key with ipad (0x36) and opad (0x5c) as defined 9080Sstevel@tonic-gate * in RFC 2104. 9090Sstevel@tonic-gate */ 9100Sstevel@tonic-gate ip = ipad; 9110Sstevel@tonic-gate op = opad; 9120Sstevel@tonic-gate nints = HMAC_BLOCK_SIZE/sizeof (uint32_t); 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate for (i = 0; i < nints; i++) { 9150Sstevel@tonic-gate ip[i] ^= 0x36363636; 9160Sstevel@tonic-gate op[i] ^= 0x5c5c5c5c; 9170Sstevel@tonic-gate } 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate /* Perform hash with ipad */ 9200Sstevel@tonic-gate HashInit(icontext); 9210Sstevel@tonic-gate HashUpdate(icontext, (uchar_t *)ipad, HMAC_BLOCK_SIZE); 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /* Perform hash with opad */ 9240Sstevel@tonic-gate HashInit(ocontext); 9250Sstevel@tonic-gate HashUpdate(ocontext, (uchar_t *)opad, HMAC_BLOCK_SIZE); 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate static void 9290Sstevel@tonic-gate hmac_encr(void *ctx, uint8_t *ptr, size_t len, uint8_t *digest) 9300Sstevel@tonic-gate { 9310Sstevel@tonic-gate HASH_CTX *saved_contexts; 9320Sstevel@tonic-gate HASH_CTX icontext; 9330Sstevel@tonic-gate HASH_CTX ocontext; 9340Sstevel@tonic-gate 9350Sstevel@tonic-gate saved_contexts = (HASH_CTX *)ctx; 9360Sstevel@tonic-gate icontext = saved_contexts[0]; 9370Sstevel@tonic-gate ocontext = saved_contexts[1]; 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate HashUpdate(&icontext, ptr, len); 9400Sstevel@tonic-gate HashFinal(digest, &icontext); 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate /* 9430Sstevel@tonic-gate * Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the 9440Sstevel@tonic-gate * Hash(K XOR IPAD, DATA). 9450Sstevel@tonic-gate */ 9460Sstevel@tonic-gate HashUpdate(&ocontext, digest, HASHSIZE); 9470Sstevel@tonic-gate HashFinal(digest, &ocontext); 9480Sstevel@tonic-gate } 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate static void 9520Sstevel@tonic-gate rndc_addbytes(uint8_t *ptr, size_t len) 9530Sstevel@tonic-gate { 9540Sstevel@tonic-gate ASSERT(ptr != NULL && len > 0); 9550Sstevel@tonic-gate ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate mutex_enter(&rndpool_lock); 9580Sstevel@tonic-gate while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 9590Sstevel@tonic-gate rndpool[rindex] ^= *ptr; 9600Sstevel@tonic-gate ptr++; len--; 9610Sstevel@tonic-gate rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 9620Sstevel@tonic-gate rnbyte_cnt++; 9630Sstevel@tonic-gate } 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate /* Handle buffer full case */ 9660Sstevel@tonic-gate while (len > 0) { 9670Sstevel@tonic-gate rndpool[rindex] ^= *ptr; 9680Sstevel@tonic-gate ptr++; len--; 9690Sstevel@tonic-gate findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 9700Sstevel@tonic-gate } 9710Sstevel@tonic-gate mutex_exit(&rndpool_lock); 9720Sstevel@tonic-gate } 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate /* 9750Sstevel@tonic-gate * Caller should check len <= rnbyte_cnt under the 9760Sstevel@tonic-gate * rndpool_lock before calling. 9770Sstevel@tonic-gate */ 9780Sstevel@tonic-gate static void 9790Sstevel@tonic-gate rndc_getbytes(uint8_t *ptr, size_t len) 9800Sstevel@tonic-gate { 9810Sstevel@tonic-gate ASSERT(MUTEX_HELD(&rndpool_lock)); 9820Sstevel@tonic-gate ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate BUMP_RND_STATS(rs_rndcOut, len); 9850Sstevel@tonic-gate 9860Sstevel@tonic-gate while (len > 0) { 9870Sstevel@tonic-gate *ptr = rndpool[findex]; 9880Sstevel@tonic-gate ptr++; len--; 9890Sstevel@tonic-gate findex = (findex + 1) & (RNDPOOLSIZE - 1); 9900Sstevel@tonic-gate rnbyte_cnt--; 9910Sstevel@tonic-gate } 9920Sstevel@tonic-gate } 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate /* Random number exported entry points */ 9950Sstevel@tonic-gate 9960Sstevel@tonic-gate /* 9970Sstevel@tonic-gate * Mix the supplied bytes into the entropy pool of a kCF 9980Sstevel@tonic-gate * RNG provider. 9990Sstevel@tonic-gate */ 10000Sstevel@tonic-gate int 1001*1920Smcpowers random_add_pseudo_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 10020Sstevel@tonic-gate { 10030Sstevel@tonic-gate if (len < 1) 10040Sstevel@tonic-gate return (-1); 10050Sstevel@tonic-gate 1006*1920Smcpowers rngprov_seed(ptr, len, entropy_est, 0); 1007*1920Smcpowers 1008*1920Smcpowers return (0); 1009*1920Smcpowers } 1010*1920Smcpowers 1011*1920Smcpowers /* 1012*1920Smcpowers * Mix the supplied bytes into the entropy pool of a kCF 1013*1920Smcpowers * RNG provider. Mix immediately. 1014*1920Smcpowers */ 1015*1920Smcpowers int 1016*1920Smcpowers random_add_entropy(uint8_t *ptr, size_t len, uint_t entropy_est) 1017*1920Smcpowers { 1018*1920Smcpowers if (len < 1) 1019*1920Smcpowers return (-1); 1020*1920Smcpowers 1021*1920Smcpowers rngprov_seed(ptr, len, entropy_est, CRYPTO_SEED_NOW); 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate return (0); 10240Sstevel@tonic-gate } 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate /* 10270Sstevel@tonic-gate * Get bytes from the /dev/urandom generator. This function 10280Sstevel@tonic-gate * always succeeds. Returns 0. 10290Sstevel@tonic-gate */ 10300Sstevel@tonic-gate int 10310Sstevel@tonic-gate random_get_pseudo_bytes(uint8_t *ptr, size_t len) 10320Sstevel@tonic-gate { 10330Sstevel@tonic-gate ASSERT(!mutex_owned(&rndpool_lock)); 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate if (len < 1) 10360Sstevel@tonic-gate return (0); 10370Sstevel@tonic-gate return (kcf_rnd_get_pseudo_bytes(ptr, len)); 10380Sstevel@tonic-gate } 10390Sstevel@tonic-gate 10400Sstevel@tonic-gate /* 10410Sstevel@tonic-gate * Get bytes from the /dev/random generator. Returns 0 10420Sstevel@tonic-gate * on success. Returns EAGAIN if there is insufficient entropy. 10430Sstevel@tonic-gate */ 10440Sstevel@tonic-gate int 10450Sstevel@tonic-gate random_get_bytes(uint8_t *ptr, size_t len) 10460Sstevel@tonic-gate { 10470Sstevel@tonic-gate ASSERT(!mutex_owned(&rndpool_lock)); 10480Sstevel@tonic-gate 10490Sstevel@tonic-gate if (len < 1) 10500Sstevel@tonic-gate return (0); 10510Sstevel@tonic-gate return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 10520Sstevel@tonic-gate } 1053