10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 23*445Skrishna * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * This file implements the interfaces that the /dev/random 310Sstevel@tonic-gate * driver uses for read(2), write(2) and poll(2) on /dev/random or 320Sstevel@tonic-gate * /dev/urandom. It also implements the kernel API - random_add_entropy(), 330Sstevel@tonic-gate * random_get_pseudo_bytes() and random_get_bytes(). 340Sstevel@tonic-gate * 350Sstevel@tonic-gate * We periodically collect random bits from providers which are registered 360Sstevel@tonic-gate * with the Kernel Cryptographic Framework (kCF) as capable of random 370Sstevel@tonic-gate * number generation. The random bits are maintained in a cache and 380Sstevel@tonic-gate * it is used for high quality random numbers (/dev/random) requests. 390Sstevel@tonic-gate * We pick a provider and call its SPI routine, if the cache does not have 400Sstevel@tonic-gate * enough bytes to satisfy a request. 410Sstevel@tonic-gate * 420Sstevel@tonic-gate * /dev/urandom requests use a software-based generator algorithm that uses the 430Sstevel@tonic-gate * random bits in the cache as a seed. We create one pseudo-random generator 440Sstevel@tonic-gate * (for /dev/urandom) per possible CPU on the system, and use it, 450Sstevel@tonic-gate * kmem-magazine-style, to avoid cache line contention. 460Sstevel@tonic-gate * 470Sstevel@tonic-gate * LOCKING HIERARCHY: 480Sstevel@tonic-gate * 1) rmp->rm_lock protects the per-cpu pseudo-random generators. 490Sstevel@tonic-gate * 2) rndpool_lock protects the high-quality randomness pool. 500Sstevel@tonic-gate * It may be locked while a rmp->rm_lock is held. 510Sstevel@tonic-gate * 520Sstevel@tonic-gate * A history note: The kernel API and the software-based algorithms in this 530Sstevel@tonic-gate * file used to be part of the /dev/random driver. 540Sstevel@tonic-gate */ 550Sstevel@tonic-gate 560Sstevel@tonic-gate #include <sys/types.h> 570Sstevel@tonic-gate #include <sys/conf.h> 580Sstevel@tonic-gate #include <sys/sunddi.h> 590Sstevel@tonic-gate #include <sys/disp.h> 600Sstevel@tonic-gate #include <sys/modctl.h> 610Sstevel@tonic-gate #include <sys/ddi.h> 620Sstevel@tonic-gate #include <sys/crypto/common.h> 630Sstevel@tonic-gate #include <sys/crypto/api.h> 640Sstevel@tonic-gate #include <sys/crypto/impl.h> 650Sstevel@tonic-gate #include <sys/crypto/sched_impl.h> 660Sstevel@tonic-gate #include <sys/random.h> 670Sstevel@tonic-gate #include <sys/sha1.h> 680Sstevel@tonic-gate #include <sys/time.h> 690Sstevel@tonic-gate #include <sys/sysmacros.h> 700Sstevel@tonic-gate #include <sys/cpuvar.h> 710Sstevel@tonic-gate #include <sys/taskq.h> 720Sstevel@tonic-gate 730Sstevel@tonic-gate #define RNDPOOLSIZE 1024 /* Pool size in bytes */ 740Sstevel@tonic-gate #define MINEXTRACTBYTES 20 750Sstevel@tonic-gate #define MAXEXTRACTBYTES 1024 760Sstevel@tonic-gate #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */ 770Sstevel@tonic-gate #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */ 780Sstevel@tonic-gate 790Sstevel@tonic-gate typedef enum extract_type { 800Sstevel@tonic-gate NONBLOCK_EXTRACT, 810Sstevel@tonic-gate BLOCKING_EXTRACT, 820Sstevel@tonic-gate ALWAYS_EXTRACT 830Sstevel@tonic-gate } extract_type_t; 840Sstevel@tonic-gate 850Sstevel@tonic-gate /* 860Sstevel@tonic-gate * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1 870Sstevel@tonic-gate * routines directly instead of using k-API because we can't return any 880Sstevel@tonic-gate * error code in /dev/urandom case and we can get an error using k-API 890Sstevel@tonic-gate * if a mechanism is disabled. 900Sstevel@tonic-gate */ 910Sstevel@tonic-gate #define HASHSIZE 20 920Sstevel@tonic-gate #define HASH_CTX SHA1_CTX 930Sstevel@tonic-gate #define HashInit(ctx) SHA1Init((ctx)) 940Sstevel@tonic-gate #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s)) 950Sstevel@tonic-gate #define HashFinal(d, ctx) SHA1Final((d), (ctx)) 960Sstevel@tonic-gate 970Sstevel@tonic-gate /* HMAC-SHA1 */ 980Sstevel@tonic-gate #define HMAC_KEYSIZE 20 990Sstevel@tonic-gate #define HMAC_BLOCK_SIZE 64 1000Sstevel@tonic-gate #define HMAC_KEYSCHED sha1keysched_t 1010Sstevel@tonic-gate #define SET_ENCRYPT_KEY(k, s, ks) hmac_key((k), (s), (ks)) 1020Sstevel@tonic-gate #define HMAC_ENCRYPT(ks, p, s, d) hmac_encr((ks), (uint8_t *)(p), s, d) 1030Sstevel@tonic-gate 1040Sstevel@tonic-gate /* HMAC-SHA1 "keyschedule" */ 1050Sstevel@tonic-gate typedef struct sha1keysched_s { 1060Sstevel@tonic-gate SHA1_CTX ictx; 1070Sstevel@tonic-gate SHA1_CTX octx; 1080Sstevel@tonic-gate } sha1keysched_t; 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate /* 1110Sstevel@tonic-gate * Cache of random bytes implemented as a circular buffer. findex and rindex 1120Sstevel@tonic-gate * track the front and back of the circular buffer. 1130Sstevel@tonic-gate */ 1140Sstevel@tonic-gate uint8_t rndpool[RNDPOOLSIZE]; 1150Sstevel@tonic-gate static int findex, rindex; 1160Sstevel@tonic-gate static int rnbyte_cnt; /* Number of bytes in the cache */ 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */ 1190Sstevel@tonic-gate /* and the global variables */ 1200Sstevel@tonic-gate static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */ 1210Sstevel@tonic-gate static int num_waiters; /* #threads waiting to read from /dev/random */ 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate static struct pollhead rnd_pollhead; 1240Sstevel@tonic-gate static timeout_id_t kcf_rndtimeout_id; 1250Sstevel@tonic-gate static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID; 1260Sstevel@tonic-gate rnd_stats_t rnd_stats; 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate static void rndc_addbytes(uint8_t *, size_t); 1290Sstevel@tonic-gate static void rndc_getbytes(uint8_t *ptr, size_t len); 1300Sstevel@tonic-gate static void rnd_handler(void *); 1310Sstevel@tonic-gate static void rnd_alloc_magazines(); 1320Sstevel@tonic-gate static void hmac_key(uint8_t *, size_t, void *); 1330Sstevel@tonic-gate static void hmac_encr(void *, uint8_t *, size_t, uint8_t *); 1340Sstevel@tonic-gate 1350Sstevel@tonic-gate 1360Sstevel@tonic-gate void 1370Sstevel@tonic-gate kcf_rnd_init() 1380Sstevel@tonic-gate { 1390Sstevel@tonic-gate hrtime_t ts; 1400Sstevel@tonic-gate time_t now; 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); 1430Sstevel@tonic-gate cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); 1440Sstevel@tonic-gate 1450Sstevel@tonic-gate /* 1460Sstevel@tonic-gate * Add bytes to the cache using 1470Sstevel@tonic-gate * . 2 unpredictable times: high resolution time since the boot-time, 1480Sstevel@tonic-gate * and the current time-of-the day. 1490Sstevel@tonic-gate * This is used only to make the timeout value in the timer 1500Sstevel@tonic-gate * unpredictable. 1510Sstevel@tonic-gate */ 1520Sstevel@tonic-gate ts = gethrtime(); 1530Sstevel@tonic-gate rndc_addbytes((uint8_t *)&ts, sizeof (ts)); 1540Sstevel@tonic-gate 1550Sstevel@tonic-gate (void) drv_getparm(TIME, &now); 1560Sstevel@tonic-gate rndc_addbytes((uint8_t *)&now, sizeof (now)); 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate rnbyte_cnt = 0; 1590Sstevel@tonic-gate findex = rindex = 0; 1600Sstevel@tonic-gate num_waiters = 0; 1610Sstevel@tonic-gate rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0); 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate rnd_alloc_magazines(); 1640Sstevel@tonic-gate } 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate /* 1670Sstevel@tonic-gate * Return TRUE if at least one provider exists that can 1680Sstevel@tonic-gate * supply random numbers. 1690Sstevel@tonic-gate */ 1700Sstevel@tonic-gate boolean_t 1710Sstevel@tonic-gate kcf_rngprov_check(void) 1720Sstevel@tonic-gate { 1730Sstevel@tonic-gate int rv; 1740Sstevel@tonic-gate kcf_provider_desc_t *pd; 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 1770Sstevel@tonic-gate NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 1780Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 1790Sstevel@tonic-gate return (B_TRUE); 1800Sstevel@tonic-gate } else 1810Sstevel@tonic-gate return (B_FALSE); 1820Sstevel@tonic-gate } 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate /* 1850Sstevel@tonic-gate * Pick a software-based provider and submit a request to seed 1860Sstevel@tonic-gate * its random number generator. 1870Sstevel@tonic-gate */ 1880Sstevel@tonic-gate static void 1890Sstevel@tonic-gate rngprov_seed(uint8_t *buf, int len) 1900Sstevel@tonic-gate { 1910Sstevel@tonic-gate kcf_provider_desc_t *pd = NULL; 1920Sstevel@tonic-gate kcf_req_params_t params; 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate if (kcf_get_sw_prov(rngmech_type, &pd, B_FALSE) == CRYPTO_SUCCESS) { 1950Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_SEED, 1960Sstevel@tonic-gate pd->pd_sid, buf, len); 1970Sstevel@tonic-gate (void) kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 1980Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 1990Sstevel@tonic-gate } 2000Sstevel@tonic-gate } 2010Sstevel@tonic-gate 2020Sstevel@tonic-gate /* Boot-time tunable for experimentation. */ 2030Sstevel@tonic-gate int kcf_limit_hwrng = 1; 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate 2060Sstevel@tonic-gate /* 2070Sstevel@tonic-gate * This routine is called for blocking reads. 2080Sstevel@tonic-gate * 2090Sstevel@tonic-gate * The argument from_user_api indicates whether the caller is 2100Sstevel@tonic-gate * from userland coming via the /dev/random driver. 2110Sstevel@tonic-gate * 2120Sstevel@tonic-gate * The argument is_taskq_thr indicates whether the caller is 2130Sstevel@tonic-gate * the taskq thread dispatched by the timeout handler routine. 2140Sstevel@tonic-gate * In this case, we cycle through all the providers 2150Sstevel@tonic-gate * submitting a request to each provider to generate random numbers. 2160Sstevel@tonic-gate * 2170Sstevel@tonic-gate * For other cases, we pick a provider and submit a request to generate 2180Sstevel@tonic-gate * random numbers. We retry using another provider if we get an error. 2190Sstevel@tonic-gate * 2200Sstevel@tonic-gate * Returns the number of bytes that are written to 'ptr'. Returns -1 2210Sstevel@tonic-gate * if no provider is found. ptr and need are unchanged. 2220Sstevel@tonic-gate */ 2230Sstevel@tonic-gate static int 2240Sstevel@tonic-gate rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api, 2250Sstevel@tonic-gate boolean_t is_taskq_thr) 2260Sstevel@tonic-gate { 2270Sstevel@tonic-gate int rv; 2280Sstevel@tonic-gate int prov_cnt = 0; 2290Sstevel@tonic-gate int total_bytes = 0; 2300Sstevel@tonic-gate kcf_provider_desc_t *pd; 2310Sstevel@tonic-gate kcf_req_params_t params; 2320Sstevel@tonic-gate kcf_prov_tried_t *list = NULL; 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 2350Sstevel@tonic-gate list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) { 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate prov_cnt++; 2380Sstevel@tonic-gate /* 2390Sstevel@tonic-gate * Typically a hardware RNG is a multi-purpose 2400Sstevel@tonic-gate * crypto card and hence we do not want to overload the card 2410Sstevel@tonic-gate * just for random numbers. The following check is to prevent 2420Sstevel@tonic-gate * a user process from hogging the hardware RNG. Note that we 2430Sstevel@tonic-gate * still use the hardware RNG from the periodically run 2440Sstevel@tonic-gate * taskq thread. 2450Sstevel@tonic-gate */ 2460Sstevel@tonic-gate if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api && 2470Sstevel@tonic-gate kcf_limit_hwrng == 1) { 2480Sstevel@tonic-gate ASSERT(is_taskq_thr == B_FALSE); 2490Sstevel@tonic-gate goto try_next; 2500Sstevel@tonic-gate } 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, 2530Sstevel@tonic-gate pd->pd_sid, ptr, need); 2540Sstevel@tonic-gate rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); 2550Sstevel@tonic-gate ASSERT(rv != CRYPTO_QUEUED); 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate if (rv == CRYPTO_SUCCESS) { 2580Sstevel@tonic-gate total_bytes += need; 2590Sstevel@tonic-gate if (is_taskq_thr) 2600Sstevel@tonic-gate rndc_addbytes(ptr, need); 2610Sstevel@tonic-gate else { 2620Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 2630Sstevel@tonic-gate break; 2640Sstevel@tonic-gate } 2650Sstevel@tonic-gate } 2660Sstevel@tonic-gate 2670Sstevel@tonic-gate if (is_taskq_thr || rv != CRYPTO_SUCCESS) { 2680Sstevel@tonic-gate try_next: 2690Sstevel@tonic-gate /* Add pd to the linked list of providers tried. */ 2700Sstevel@tonic-gate if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { 2710Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 2720Sstevel@tonic-gate break; 2730Sstevel@tonic-gate } 2740Sstevel@tonic-gate } 2750Sstevel@tonic-gate 2760Sstevel@tonic-gate } 2770Sstevel@tonic-gate 2780Sstevel@tonic-gate if (list != NULL) 2790Sstevel@tonic-gate kcf_free_triedlist(list); 2800Sstevel@tonic-gate 2810Sstevel@tonic-gate if (prov_cnt == 0) { /* no provider could be found. */ 2820Sstevel@tonic-gate return (-1); 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate return (total_bytes); 2860Sstevel@tonic-gate } 2870Sstevel@tonic-gate 2880Sstevel@tonic-gate static void 2890Sstevel@tonic-gate notify_done(void *arg, int rv) 2900Sstevel@tonic-gate { 2910Sstevel@tonic-gate uchar_t *rndbuf = arg; 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate if (rv == CRYPTO_SUCCESS) 2940Sstevel@tonic-gate rndc_addbytes(rndbuf, MINEXTRACTBYTES); 2950Sstevel@tonic-gate 2960Sstevel@tonic-gate bzero(rndbuf, MINEXTRACTBYTES); 2970Sstevel@tonic-gate kmem_free(rndbuf, MINEXTRACTBYTES); 2980Sstevel@tonic-gate } 2990Sstevel@tonic-gate 3000Sstevel@tonic-gate /* 3010Sstevel@tonic-gate * Cycle through all the providers submitting a request to each provider 3020Sstevel@tonic-gate * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT 3030Sstevel@tonic-gate * and ALWAYS_EXTRACT. 3040Sstevel@tonic-gate * 3050Sstevel@tonic-gate * Returns the number of bytes that are written to 'ptr'. Returns -1 3060Sstevel@tonic-gate * if no provider is found. ptr and len are unchanged. 3070Sstevel@tonic-gate */ 3080Sstevel@tonic-gate static int 3090Sstevel@tonic-gate rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api) 3100Sstevel@tonic-gate { 3110Sstevel@tonic-gate int rv, blen, total_bytes; 3120Sstevel@tonic-gate uchar_t *rndbuf; 3130Sstevel@tonic-gate kcf_provider_desc_t *pd; 3140Sstevel@tonic-gate kcf_req_params_t params; 3150Sstevel@tonic-gate crypto_call_req_t req; 3160Sstevel@tonic-gate kcf_prov_tried_t *list = NULL; 3170Sstevel@tonic-gate int prov_cnt = 0; 3180Sstevel@tonic-gate 3190Sstevel@tonic-gate blen = 0; 3200Sstevel@tonic-gate total_bytes = 0; 3210Sstevel@tonic-gate req.cr_flag = CRYPTO_SKIP_REQID; 3220Sstevel@tonic-gate req.cr_callback_func = notify_done; 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv, 3250Sstevel@tonic-gate list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) { 3260Sstevel@tonic-gate 3270Sstevel@tonic-gate prov_cnt ++; 3280Sstevel@tonic-gate switch (pd->pd_prov_type) { 3290Sstevel@tonic-gate case CRYPTO_HW_PROVIDER: 3300Sstevel@tonic-gate /* See comments in rngprov_getbytes() */ 3310Sstevel@tonic-gate if (from_user_api && kcf_limit_hwrng == 1) 3320Sstevel@tonic-gate goto try_next; 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate /* 3350Sstevel@tonic-gate * We have to allocate a buffer here as we can not 3360Sstevel@tonic-gate * assume that the input buffer will remain valid 3370Sstevel@tonic-gate * when the callback comes. We use a fixed size buffer 3380Sstevel@tonic-gate * to simplify the book keeping. 3390Sstevel@tonic-gate */ 3400Sstevel@tonic-gate rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); 3410Sstevel@tonic-gate if (rndbuf == NULL) { 3420Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 3430Sstevel@tonic-gate if (list != NULL) 3440Sstevel@tonic-gate kcf_free_triedlist(list); 3450Sstevel@tonic-gate return (total_bytes); 3460Sstevel@tonic-gate } 3470Sstevel@tonic-gate req.cr_callback_arg = rndbuf; 3480Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 3490Sstevel@tonic-gate KCF_OP_RANDOM_GENERATE, 3500Sstevel@tonic-gate pd->pd_sid, rndbuf, MINEXTRACTBYTES); 3510Sstevel@tonic-gate break; 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate case CRYPTO_SW_PROVIDER: 3540Sstevel@tonic-gate /* 3550Sstevel@tonic-gate * We do not need to allocate a buffer in the software 3560Sstevel@tonic-gate * provider case as there is no callback involved. We 3570Sstevel@tonic-gate * avoid any extra data copy by directly passing 'ptr'. 3580Sstevel@tonic-gate */ 3590Sstevel@tonic-gate KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, 3600Sstevel@tonic-gate KCF_OP_RANDOM_GENERATE, 3610Sstevel@tonic-gate pd->pd_sid, ptr, len); 3620Sstevel@tonic-gate break; 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); 3660Sstevel@tonic-gate if (rv == CRYPTO_SUCCESS) { 3670Sstevel@tonic-gate switch (pd->pd_prov_type) { 3680Sstevel@tonic-gate case CRYPTO_HW_PROVIDER: 3690Sstevel@tonic-gate /* 3700Sstevel@tonic-gate * Since we have the input buffer handy, 3710Sstevel@tonic-gate * we directly copy to it rather than 3720Sstevel@tonic-gate * adding to the pool. 3730Sstevel@tonic-gate */ 3740Sstevel@tonic-gate blen = min(MINEXTRACTBYTES, len); 3750Sstevel@tonic-gate bcopy(rndbuf, ptr, blen); 3760Sstevel@tonic-gate if (len < MINEXTRACTBYTES) 3770Sstevel@tonic-gate rndc_addbytes(rndbuf + len, 3780Sstevel@tonic-gate MINEXTRACTBYTES - len); 3790Sstevel@tonic-gate ptr += blen; 3800Sstevel@tonic-gate len -= blen; 3810Sstevel@tonic-gate total_bytes += blen; 3820Sstevel@tonic-gate break; 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate case CRYPTO_SW_PROVIDER: 3850Sstevel@tonic-gate total_bytes += len; 3860Sstevel@tonic-gate len = 0; 3870Sstevel@tonic-gate break; 3880Sstevel@tonic-gate } 3890Sstevel@tonic-gate } 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate /* 3920Sstevel@tonic-gate * We free the buffer in the callback routine 3930Sstevel@tonic-gate * for the CRYPTO_QUEUED case. 3940Sstevel@tonic-gate */ 3950Sstevel@tonic-gate if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && 3960Sstevel@tonic-gate rv != CRYPTO_QUEUED) { 3970Sstevel@tonic-gate bzero(rndbuf, MINEXTRACTBYTES); 3980Sstevel@tonic-gate kmem_free(rndbuf, MINEXTRACTBYTES); 3990Sstevel@tonic-gate } 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate if (len == 0) { 4020Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 4030Sstevel@tonic-gate break; 4040Sstevel@tonic-gate } 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate if (rv != CRYPTO_SUCCESS) { 4070Sstevel@tonic-gate try_next: 4080Sstevel@tonic-gate /* Add pd to the linked list of providers tried. */ 4090Sstevel@tonic-gate if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == 4100Sstevel@tonic-gate NULL) { 4110Sstevel@tonic-gate KCF_PROV_REFRELE(pd); 4120Sstevel@tonic-gate break; 4130Sstevel@tonic-gate } 4140Sstevel@tonic-gate } 4150Sstevel@tonic-gate } 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate if (list != NULL) { 4180Sstevel@tonic-gate kcf_free_triedlist(list); 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate if (prov_cnt == 0) { /* no provider could be found. */ 4220Sstevel@tonic-gate return (-1); 4230Sstevel@tonic-gate } 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate return (total_bytes); 4260Sstevel@tonic-gate } 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate static void 4290Sstevel@tonic-gate rngprov_task(void *arg) 4300Sstevel@tonic-gate { 4310Sstevel@tonic-gate int len = (int)(uintptr_t)arg; 4320Sstevel@tonic-gate uchar_t tbuf[MAXEXTRACTBYTES]; 4330Sstevel@tonic-gate 4340Sstevel@tonic-gate ASSERT(len <= MAXEXTRACTBYTES); 4350Sstevel@tonic-gate if (rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE) == -1) { 4360Sstevel@tonic-gate cmn_err(CE_WARN, "No randomness provider enabled for " 4370Sstevel@tonic-gate "/dev/random. Use cryptoadm(1M) to enable a provider."); 4380Sstevel@tonic-gate } 4390Sstevel@tonic-gate } 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate /* 4420Sstevel@tonic-gate * Returns "len" random or pseudo-random bytes in *ptr. 4430Sstevel@tonic-gate * Will block if not enough random bytes are available and the 4440Sstevel@tonic-gate * call is blocking. 4450Sstevel@tonic-gate * 4460Sstevel@tonic-gate * Called with rndpool_lock held (allowing caller to do optimistic locking; 4470Sstevel@tonic-gate * releases the lock before return). 4480Sstevel@tonic-gate */ 4490Sstevel@tonic-gate static int 4500Sstevel@tonic-gate rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how, 4510Sstevel@tonic-gate boolean_t from_user_api) 4520Sstevel@tonic-gate { 4530Sstevel@tonic-gate int bytes; 4540Sstevel@tonic-gate size_t got; 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate ASSERT(mutex_owned(&rndpool_lock)); 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * Check if the request can be satisfied from the cache 4590Sstevel@tonic-gate * of random bytes. 4600Sstevel@tonic-gate */ 4610Sstevel@tonic-gate if (len <= rnbyte_cnt) { 4620Sstevel@tonic-gate rndc_getbytes(ptr, len); 4630Sstevel@tonic-gate mutex_exit(&rndpool_lock); 4640Sstevel@tonic-gate return (0); 4650Sstevel@tonic-gate } 4660Sstevel@tonic-gate mutex_exit(&rndpool_lock); 4670Sstevel@tonic-gate 4680Sstevel@tonic-gate switch (how) { 4690Sstevel@tonic-gate case BLOCKING_EXTRACT: 4700Sstevel@tonic-gate if ((got = rngprov_getbytes(ptr, len, from_user_api, 4710Sstevel@tonic-gate B_FALSE)) == -1) 4720Sstevel@tonic-gate break; /* No provider found */ 4730Sstevel@tonic-gate 4740Sstevel@tonic-gate if (got == len) 4750Sstevel@tonic-gate return (0); 4760Sstevel@tonic-gate len -= got; 4770Sstevel@tonic-gate ptr += got; 4780Sstevel@tonic-gate break; 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate case NONBLOCK_EXTRACT: 4810Sstevel@tonic-gate case ALWAYS_EXTRACT: 4820Sstevel@tonic-gate if ((got = rngprov_getbytes_nblk(ptr, len, 4830Sstevel@tonic-gate from_user_api)) == -1) { 4840Sstevel@tonic-gate /* No provider found */ 4850Sstevel@tonic-gate if (how == NONBLOCK_EXTRACT) { 4860Sstevel@tonic-gate return (EAGAIN); 4870Sstevel@tonic-gate } 4880Sstevel@tonic-gate } else { 4890Sstevel@tonic-gate if (got == len) 4900Sstevel@tonic-gate return (0); 4910Sstevel@tonic-gate len -= got; 4920Sstevel@tonic-gate ptr += got; 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len)) 4950Sstevel@tonic-gate return (EAGAIN); 4960Sstevel@tonic-gate break; 4970Sstevel@tonic-gate } 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate mutex_enter(&rndpool_lock); 5000Sstevel@tonic-gate while (len > 0) { 5010Sstevel@tonic-gate if (how == BLOCKING_EXTRACT) { 5020Sstevel@tonic-gate /* Check if there is enough */ 5030Sstevel@tonic-gate while (rnbyte_cnt < MINEXTRACTBYTES) { 5040Sstevel@tonic-gate num_waiters++; 5050Sstevel@tonic-gate if (cv_wait_sig(&rndpool_read_cv, 5060Sstevel@tonic-gate &rndpool_lock) == 0) { 5070Sstevel@tonic-gate num_waiters--; 5080Sstevel@tonic-gate mutex_exit(&rndpool_lock); 5090Sstevel@tonic-gate return (EINTR); 5100Sstevel@tonic-gate } 5110Sstevel@tonic-gate num_waiters--; 5120Sstevel@tonic-gate } 5130Sstevel@tonic-gate } 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate /* Figure out how many bytes to extract */ 5160Sstevel@tonic-gate bytes = min(len, rnbyte_cnt); 5170Sstevel@tonic-gate rndc_getbytes(ptr, bytes); 5180Sstevel@tonic-gate 5190Sstevel@tonic-gate len -= bytes; 5200Sstevel@tonic-gate ptr += bytes; 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate if (len > 0 && how == ALWAYS_EXTRACT) { 5230Sstevel@tonic-gate /* 5240Sstevel@tonic-gate * There are not enough bytes, but we can not block. 5250Sstevel@tonic-gate * This only happens in the case of /dev/urandom which 5260Sstevel@tonic-gate * runs an additional generation algorithm. So, there 5270Sstevel@tonic-gate * is no problem. 5280Sstevel@tonic-gate */ 5290Sstevel@tonic-gate while (len > 0) { 5300Sstevel@tonic-gate *ptr = rndpool[findex]; 5310Sstevel@tonic-gate ptr++; len--; 5320Sstevel@tonic-gate rindex = findex = (findex + 1) & 5330Sstevel@tonic-gate (RNDPOOLSIZE - 1); 5340Sstevel@tonic-gate } 5350Sstevel@tonic-gate break; 5360Sstevel@tonic-gate } 5370Sstevel@tonic-gate } 5380Sstevel@tonic-gate 5390Sstevel@tonic-gate mutex_exit(&rndpool_lock); 5400Sstevel@tonic-gate return (0); 5410Sstevel@tonic-gate } 5420Sstevel@tonic-gate 5430Sstevel@tonic-gate int 5440Sstevel@tonic-gate kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock, 5450Sstevel@tonic-gate boolean_t from_user_api) 5460Sstevel@tonic-gate { 5470Sstevel@tonic-gate extract_type_t how; 5480Sstevel@tonic-gate int error; 5490Sstevel@tonic-gate 5500Sstevel@tonic-gate how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT; 5510Sstevel@tonic-gate mutex_enter(&rndpool_lock); 5520Sstevel@tonic-gate if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0) 5530Sstevel@tonic-gate return (error); 5540Sstevel@tonic-gate 5550Sstevel@tonic-gate BUMP_RND_STATS(rs_rndOut, len); 5560Sstevel@tonic-gate return (0); 5570Sstevel@tonic-gate } 5580Sstevel@tonic-gate 5590Sstevel@tonic-gate /* 5600Sstevel@tonic-gate * Revisit this if the structs grow or we come up with a better way 5610Sstevel@tonic-gate * of cache-line-padding structures. 5620Sstevel@tonic-gate */ 5630Sstevel@tonic-gate #define RND_CPU_CACHE_SIZE 64 5640Sstevel@tonic-gate #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*5 5650Sstevel@tonic-gate #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \ 5660Sstevel@tonic-gate (sizeof (kmutex_t) + 3*sizeof (uint8_t *) + sizeof (HMAC_KEYSCHED) + \ 5670Sstevel@tonic-gate sizeof (uint64_t) + 3*sizeof (uint32_t) + sizeof (rnd_stats_t))) 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate /* 5700Sstevel@tonic-gate * Per-CPU random state. Somewhat like like kmem's magazines, this provides 5710Sstevel@tonic-gate * a per-CPU instance of the pseudo-random generator. We have it much easier 5720Sstevel@tonic-gate * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out. 5730Sstevel@tonic-gate * 5740Sstevel@tonic-gate * Note that this usage is preemption-safe; a thread 5750Sstevel@tonic-gate * entering a critical section remembers which generator it locked 5760Sstevel@tonic-gate * and unlocks the same one; should it be preempted and wind up running on 5770Sstevel@tonic-gate * a different CPU, there will be a brief period of increased contention 5780Sstevel@tonic-gate * before it exits the critical section but nothing will melt. 5790Sstevel@tonic-gate */ 5800Sstevel@tonic-gate typedef struct rndmag_s 5810Sstevel@tonic-gate { 5820Sstevel@tonic-gate kmutex_t rm_lock; 5830Sstevel@tonic-gate uint8_t *rm_buffer; /* Start of buffer */ 5840Sstevel@tonic-gate uint8_t *rm_eptr; /* End of buffer */ 5850Sstevel@tonic-gate uint8_t *rm_rptr; /* Current read pointer */ 5860Sstevel@tonic-gate HMAC_KEYSCHED rm_ks; /* seed */ 5870Sstevel@tonic-gate uint64_t rm_counter; /* rotating counter for extracting */ 5880Sstevel@tonic-gate uint32_t rm_oblocks; /* time to rekey? */ 5890Sstevel@tonic-gate uint32_t rm_ofuzz; /* Rekey backoff state */ 5900Sstevel@tonic-gate uint32_t rm_olimit; /* Hard rekey limit */ 5910Sstevel@tonic-gate rnd_stats_t rm_stats; /* Per-CPU Statistics */ 5920Sstevel@tonic-gate uint8_t rm_pad[RND_CPU_PAD]; 5930Sstevel@tonic-gate } rndmag_t; 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate /* 5960Sstevel@tonic-gate * Generate random bytes for /dev/urandom by encrypting a 5970Sstevel@tonic-gate * rotating counter with a key created from bytes extracted 5980Sstevel@tonic-gate * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks 5990Sstevel@tonic-gate * is generated before a new key is obtained. 6000Sstevel@tonic-gate * 6010Sstevel@tonic-gate * Note that callers to this routine are likely to assume it can't fail. 6020Sstevel@tonic-gate * 6030Sstevel@tonic-gate * Called with rmp locked; releases lock. 6040Sstevel@tonic-gate */ 6050Sstevel@tonic-gate static int 6060Sstevel@tonic-gate rnd_generate_pseudo_bytes(rndmag_t *rmp, uint8_t *ptr, size_t len) 6070Sstevel@tonic-gate { 6080Sstevel@tonic-gate size_t bytes = len; 6090Sstevel@tonic-gate int nblock, size; 6100Sstevel@tonic-gate uint32_t oblocks; 6110Sstevel@tonic-gate uint8_t digest[HASHSIZE]; 6120Sstevel@tonic-gate 6130Sstevel@tonic-gate ASSERT(mutex_owned(&rmp->rm_lock)); 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate /* Nothing is being asked */ 6160Sstevel@tonic-gate if (len == 0) { 6170Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 6180Sstevel@tonic-gate return (0); 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate nblock = howmany(len, HASHSIZE); 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate rmp->rm_oblocks += nblock; 6240Sstevel@tonic-gate oblocks = rmp->rm_oblocks; 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate do { 6270Sstevel@tonic-gate if (oblocks >= rmp->rm_olimit) { 6280Sstevel@tonic-gate hrtime_t timestamp; 6290Sstevel@tonic-gate uint8_t key[HMAC_KEYSIZE]; 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate /* 6320Sstevel@tonic-gate * Contention-avoiding rekey: see if 6330Sstevel@tonic-gate * the pool is locked, and if so, wait a bit. 6340Sstevel@tonic-gate * Do an 'exponential back-in' to ensure we don't 6350Sstevel@tonic-gate * run too long without rekey. 6360Sstevel@tonic-gate */ 6370Sstevel@tonic-gate if (rmp->rm_ofuzz) { 6380Sstevel@tonic-gate /* 6390Sstevel@tonic-gate * Decaying exponential back-in for rekey. 6400Sstevel@tonic-gate */ 6410Sstevel@tonic-gate if ((rnbyte_cnt < MINEXTRACTBYTES) || 6420Sstevel@tonic-gate (!mutex_tryenter(&rndpool_lock))) { 6430Sstevel@tonic-gate rmp->rm_olimit += rmp->rm_ofuzz; 6440Sstevel@tonic-gate rmp->rm_ofuzz >>= 1; 6450Sstevel@tonic-gate goto punt; 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate } else { 6480Sstevel@tonic-gate mutex_enter(&rndpool_lock); 6490Sstevel@tonic-gate } 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate /* Get a new chunk of entropy */ 6520Sstevel@tonic-gate (void) rnd_get_bytes(key, HMAC_KEYSIZE, 6530Sstevel@tonic-gate ALWAYS_EXTRACT, B_FALSE); 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate /* Set up key */ 6560Sstevel@tonic-gate SET_ENCRYPT_KEY(key, HMAC_KEYSIZE, &rmp->rm_ks); 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate /* Get new counter value by encrypting timestamp */ 6590Sstevel@tonic-gate timestamp = gethrtime(); 6600Sstevel@tonic-gate HMAC_ENCRYPT(&rmp->rm_ks, ×tamp, 6610Sstevel@tonic-gate sizeof (timestamp), digest); 6620Sstevel@tonic-gate rmp->rm_olimit = PRNG_MAXOBLOCKS/2; 6630Sstevel@tonic-gate rmp->rm_ofuzz = PRNG_MAXOBLOCKS/4; 6640Sstevel@tonic-gate bcopy(digest, &rmp->rm_counter, sizeof (uint64_t)); 6650Sstevel@tonic-gate oblocks = 0; 6660Sstevel@tonic-gate rmp->rm_oblocks = nblock; 6670Sstevel@tonic-gate } 6680Sstevel@tonic-gate punt: 6690Sstevel@tonic-gate /* Hash counter to produce prn stream */ 6700Sstevel@tonic-gate if (bytes >= HASHSIZE) { 6710Sstevel@tonic-gate size = HASHSIZE; 6720Sstevel@tonic-gate HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 6730Sstevel@tonic-gate sizeof (rmp->rm_counter), ptr); 6740Sstevel@tonic-gate } else { 6750Sstevel@tonic-gate size = min(bytes, HASHSIZE); 6760Sstevel@tonic-gate HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter, 6770Sstevel@tonic-gate sizeof (rmp->rm_counter), digest); 6780Sstevel@tonic-gate bcopy(digest, ptr, size); 6790Sstevel@tonic-gate } 6800Sstevel@tonic-gate ptr += size; 6810Sstevel@tonic-gate bytes -= size; 6820Sstevel@tonic-gate rmp->rm_counter++; 6830Sstevel@tonic-gate oblocks++; 6840Sstevel@tonic-gate nblock--; 6850Sstevel@tonic-gate } while (bytes > 0); 6860Sstevel@tonic-gate 6870Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 6880Sstevel@tonic-gate return (0); 6890Sstevel@tonic-gate } 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate /* 6920Sstevel@tonic-gate * Per-CPU Random magazines. 6930Sstevel@tonic-gate */ 6940Sstevel@tonic-gate static rndmag_t *rndmag; 6950Sstevel@tonic-gate static uint8_t *rndbuf; 6960Sstevel@tonic-gate static size_t rndmag_total; 6970Sstevel@tonic-gate /* 6980Sstevel@tonic-gate * common/os/cpu.c says that platform support code can shrinkwrap 6990Sstevel@tonic-gate * max_ncpus. On the off chance that we get loaded very early, we 7000Sstevel@tonic-gate * read it exactly once, to copy it here. 7010Sstevel@tonic-gate */ 7020Sstevel@tonic-gate static uint32_t random_max_ncpus = 0; 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate /* 7050Sstevel@tonic-gate * Boot-time tunables, for experimentation. 7060Sstevel@tonic-gate */ 707*445Skrishna size_t rndmag_threshold = 64; 708*445Skrishna size_t rndbuf_len = 1280; 709*445Skrishna size_t rndmag_size = 1280; 7100Sstevel@tonic-gate 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate int 7130Sstevel@tonic-gate kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len) 7140Sstevel@tonic-gate { 7150Sstevel@tonic-gate rndmag_t *rmp; 7160Sstevel@tonic-gate uint8_t *cptr, *eptr; 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate /* 7190Sstevel@tonic-gate * Anyone who asks for zero bytes of randomness should get slapped. 7200Sstevel@tonic-gate */ 7210Sstevel@tonic-gate ASSERT(len > 0); 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate /* 7240Sstevel@tonic-gate * Fast path. 7250Sstevel@tonic-gate */ 7260Sstevel@tonic-gate for (;;) { 7270Sstevel@tonic-gate rmp = &rndmag[CPU->cpu_seqid]; 7280Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 7290Sstevel@tonic-gate 7300Sstevel@tonic-gate /* 7310Sstevel@tonic-gate * Big requests bypass buffer and tail-call the 7320Sstevel@tonic-gate * generate routine directly. 7330Sstevel@tonic-gate */ 7340Sstevel@tonic-gate if (len > rndmag_threshold) { 7350Sstevel@tonic-gate BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 7360Sstevel@tonic-gate return (rnd_generate_pseudo_bytes(rmp, ptr, len)); 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate cptr = rmp->rm_rptr; 7400Sstevel@tonic-gate eptr = cptr + len; 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate if (eptr <= rmp->rm_eptr) { 7430Sstevel@tonic-gate rmp->rm_rptr = eptr; 7440Sstevel@tonic-gate bcopy(cptr, ptr, len); 7450Sstevel@tonic-gate BUMP_CPU_RND_STATS(rmp, rs_urndOut, len); 7460Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate return (0); 7490Sstevel@tonic-gate } 7500Sstevel@tonic-gate /* 7510Sstevel@tonic-gate * End fast path. 7520Sstevel@tonic-gate */ 7530Sstevel@tonic-gate rmp->rm_rptr = rmp->rm_buffer; 7540Sstevel@tonic-gate /* 7550Sstevel@tonic-gate * Note: We assume the generate routine always succeeds 7560Sstevel@tonic-gate * in this case (because it does at present..) 7570Sstevel@tonic-gate * It also always releases rm_lock. 7580Sstevel@tonic-gate */ 7590Sstevel@tonic-gate (void) rnd_generate_pseudo_bytes(rmp, rmp->rm_buffer, 7600Sstevel@tonic-gate rndbuf_len); 7610Sstevel@tonic-gate } 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate /* 7650Sstevel@tonic-gate * We set up (empty) magazines for all of max_ncpus, possibly wasting a 7660Sstevel@tonic-gate * little memory on big systems that don't have the full set installed. 7670Sstevel@tonic-gate * See above; "empty" means "rptr equal to eptr"; this will trigger the 7680Sstevel@tonic-gate * refill path in rnd_get_pseudo_bytes above on the first call for each CPU. 7690Sstevel@tonic-gate * 7700Sstevel@tonic-gate * TODO: make rndmag_size tunable at run time! 7710Sstevel@tonic-gate */ 7720Sstevel@tonic-gate static void 7730Sstevel@tonic-gate rnd_alloc_magazines() 7740Sstevel@tonic-gate { 7750Sstevel@tonic-gate rndmag_t *rmp; 7760Sstevel@tonic-gate int i; 7770Sstevel@tonic-gate 7780Sstevel@tonic-gate rndbuf_len = roundup(rndbuf_len, HASHSIZE); 7790Sstevel@tonic-gate if (rndmag_size < rndbuf_len) 7800Sstevel@tonic-gate rndmag_size = rndbuf_len; 7810Sstevel@tonic-gate rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE); 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate random_max_ncpus = max_ncpus; 7840Sstevel@tonic-gate rndmag_total = rndmag_size * random_max_ncpus; 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate rndbuf = kmem_alloc(rndmag_total, KM_SLEEP); 7870Sstevel@tonic-gate rndmag = kmem_zalloc(sizeof (rndmag_t) * random_max_ncpus, KM_SLEEP); 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate for (i = 0; i < random_max_ncpus; i++) { 7900Sstevel@tonic-gate uint8_t *buf; 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate rmp = &rndmag[i]; 7930Sstevel@tonic-gate mutex_init(&rmp->rm_lock, NULL, MUTEX_DRIVER, NULL); 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate buf = rndbuf + i * rndmag_size; 7960Sstevel@tonic-gate 7970Sstevel@tonic-gate rmp->rm_buffer = buf; 7980Sstevel@tonic-gate rmp->rm_eptr = buf + rndbuf_len; 7990Sstevel@tonic-gate rmp->rm_rptr = buf + rndbuf_len; 8000Sstevel@tonic-gate rmp->rm_oblocks = 1; 8010Sstevel@tonic-gate } 8020Sstevel@tonic-gate } 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate void 8050Sstevel@tonic-gate kcf_rnd_schedule_timeout(boolean_t do_mech2id) 8060Sstevel@tonic-gate { 8070Sstevel@tonic-gate clock_t ut; /* time in microseconds */ 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate if (do_mech2id) 8100Sstevel@tonic-gate rngmech_type = crypto_mech2id(SUN_RANDOM); 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate /* 8130Sstevel@tonic-gate * The new timeout value is taken from the buffer of random bytes. 8140Sstevel@tonic-gate * We're merely reading the first 32 bits from the buffer here, not 8150Sstevel@tonic-gate * consuming any random bytes. 8160Sstevel@tonic-gate * The timeout multiplier value is a random value between 0.5 sec and 8170Sstevel@tonic-gate * 1.544480 sec (0.5 sec + 0xFF000 microseconds). 8180Sstevel@tonic-gate * The new timeout is TIMEOUT_INTERVAL times that multiplier. 8190Sstevel@tonic-gate */ 8200Sstevel@tonic-gate ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000); 8210Sstevel@tonic-gate kcf_rndtimeout_id = timeout(rnd_handler, NULL, 8220Sstevel@tonic-gate TIMEOUT_INTERVAL * drv_usectohz(ut)); 8230Sstevel@tonic-gate } 8240Sstevel@tonic-gate 8250Sstevel@tonic-gate /* 8260Sstevel@tonic-gate * &rnd_pollhead is passed in *phpp in order to indicate the calling thread 8270Sstevel@tonic-gate * will block. When enough random bytes are available, later, the timeout 8280Sstevel@tonic-gate * handler routine will issue the pollwakeup() calls. 8290Sstevel@tonic-gate */ 8300Sstevel@tonic-gate void 8310Sstevel@tonic-gate kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp) 8320Sstevel@tonic-gate { 8330Sstevel@tonic-gate /* 8340Sstevel@tonic-gate * Sampling of rnbyte_cnt is an atomic 8350Sstevel@tonic-gate * operation. Hence we do not need any locking. 8360Sstevel@tonic-gate */ 8370Sstevel@tonic-gate if (rnbyte_cnt >= MINEXTRACTBYTES) { 8380Sstevel@tonic-gate *reventsp |= (POLLIN | POLLRDNORM); 8390Sstevel@tonic-gate } else { 8400Sstevel@tonic-gate *reventsp = 0; 8410Sstevel@tonic-gate if (!anyyet) 8420Sstevel@tonic-gate *phpp = &rnd_pollhead; 8430Sstevel@tonic-gate } 8440Sstevel@tonic-gate } 8450Sstevel@tonic-gate 8460Sstevel@tonic-gate /*ARGSUSED*/ 8470Sstevel@tonic-gate static void 8480Sstevel@tonic-gate rnd_handler(void *arg) 8490Sstevel@tonic-gate { 8500Sstevel@tonic-gate int len = 0; 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate if (num_waiters > 0) 8530Sstevel@tonic-gate len = MAXEXTRACTBYTES; 8540Sstevel@tonic-gate else if (rnbyte_cnt < RNDPOOLSIZE) 8550Sstevel@tonic-gate len = MINEXTRACTBYTES; 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate if (len > 0) { 8580Sstevel@tonic-gate (void) taskq_dispatch(system_taskq, rngprov_task, 8590Sstevel@tonic-gate (void *)(uintptr_t)len, TQ_NOSLEEP); 8600Sstevel@tonic-gate } else if (!kcf_rngprov_check()) { 8610Sstevel@tonic-gate cmn_err(CE_WARN, "No randomness provider enabled for " 8620Sstevel@tonic-gate "/dev/random. Use cryptoadm(1M) to enable a provider."); 8630Sstevel@tonic-gate } 8640Sstevel@tonic-gate 8650Sstevel@tonic-gate mutex_enter(&rndpool_lock); 8660Sstevel@tonic-gate /* 8670Sstevel@tonic-gate * Wake up threads waiting in poll() or for enough accumulated 8680Sstevel@tonic-gate * random bytes to read from /dev/random. In case a poll() is 8690Sstevel@tonic-gate * concurrent with a read(), the polling process may be woken up 8700Sstevel@tonic-gate * indicating that enough randomness is now available for reading, 8710Sstevel@tonic-gate * and another process *steals* the bits from the pool, causing the 8720Sstevel@tonic-gate * subsequent read() from the first process to block. It is acceptable 8730Sstevel@tonic-gate * since the blocking will eventually end, after the timeout 8740Sstevel@tonic-gate * has expired enough times to honor the read. 8750Sstevel@tonic-gate * 8760Sstevel@tonic-gate * Note - Since we hold the rndpool_lock across the pollwakeup() call 8770Sstevel@tonic-gate * we MUST NOT grab the rndpool_lock in kcf_rndchpoll(). 8780Sstevel@tonic-gate */ 8790Sstevel@tonic-gate if (rnbyte_cnt >= MINEXTRACTBYTES) 8800Sstevel@tonic-gate pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM); 8810Sstevel@tonic-gate 8820Sstevel@tonic-gate if (num_waiters > 0) 8830Sstevel@tonic-gate cv_broadcast(&rndpool_read_cv); 8840Sstevel@tonic-gate mutex_exit(&rndpool_lock); 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate kcf_rnd_schedule_timeout(B_FALSE); 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate /* Hashing functions */ 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate static void 8920Sstevel@tonic-gate hmac_key(uint8_t *key, size_t keylen, void *buf) 8930Sstevel@tonic-gate { 8940Sstevel@tonic-gate uint32_t *ip, *op; 8950Sstevel@tonic-gate uint32_t ipad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 8960Sstevel@tonic-gate uint32_t opad[HMAC_BLOCK_SIZE/sizeof (uint32_t)]; 8970Sstevel@tonic-gate HASH_CTX *icontext, *ocontext; 8980Sstevel@tonic-gate int i; 8990Sstevel@tonic-gate int nints; 9000Sstevel@tonic-gate 9010Sstevel@tonic-gate icontext = buf; 9020Sstevel@tonic-gate ocontext = (SHA1_CTX *)((uint8_t *)buf + sizeof (HASH_CTX)); 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate bzero((uchar_t *)ipad, HMAC_BLOCK_SIZE); 9050Sstevel@tonic-gate bzero((uchar_t *)opad, HMAC_BLOCK_SIZE); 9060Sstevel@tonic-gate bcopy(key, (uchar_t *)ipad, keylen); 9070Sstevel@tonic-gate bcopy(key, (uchar_t *)opad, keylen); 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate /* 9100Sstevel@tonic-gate * XOR key with ipad (0x36) and opad (0x5c) as defined 9110Sstevel@tonic-gate * in RFC 2104. 9120Sstevel@tonic-gate */ 9130Sstevel@tonic-gate ip = ipad; 9140Sstevel@tonic-gate op = opad; 9150Sstevel@tonic-gate nints = HMAC_BLOCK_SIZE/sizeof (uint32_t); 9160Sstevel@tonic-gate 9170Sstevel@tonic-gate for (i = 0; i < nints; i++) { 9180Sstevel@tonic-gate ip[i] ^= 0x36363636; 9190Sstevel@tonic-gate op[i] ^= 0x5c5c5c5c; 9200Sstevel@tonic-gate } 9210Sstevel@tonic-gate 9220Sstevel@tonic-gate /* Perform hash with ipad */ 9230Sstevel@tonic-gate HashInit(icontext); 9240Sstevel@tonic-gate HashUpdate(icontext, (uchar_t *)ipad, HMAC_BLOCK_SIZE); 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate /* Perform hash with opad */ 9270Sstevel@tonic-gate HashInit(ocontext); 9280Sstevel@tonic-gate HashUpdate(ocontext, (uchar_t *)opad, HMAC_BLOCK_SIZE); 9290Sstevel@tonic-gate } 9300Sstevel@tonic-gate 9310Sstevel@tonic-gate static void 9320Sstevel@tonic-gate hmac_encr(void *ctx, uint8_t *ptr, size_t len, uint8_t *digest) 9330Sstevel@tonic-gate { 9340Sstevel@tonic-gate HASH_CTX *saved_contexts; 9350Sstevel@tonic-gate HASH_CTX icontext; 9360Sstevel@tonic-gate HASH_CTX ocontext; 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate saved_contexts = (HASH_CTX *)ctx; 9390Sstevel@tonic-gate icontext = saved_contexts[0]; 9400Sstevel@tonic-gate ocontext = saved_contexts[1]; 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate HashUpdate(&icontext, ptr, len); 9430Sstevel@tonic-gate HashFinal(digest, &icontext); 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate /* 9460Sstevel@tonic-gate * Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the 9470Sstevel@tonic-gate * Hash(K XOR IPAD, DATA). 9480Sstevel@tonic-gate */ 9490Sstevel@tonic-gate HashUpdate(&ocontext, digest, HASHSIZE); 9500Sstevel@tonic-gate HashFinal(digest, &ocontext); 9510Sstevel@tonic-gate } 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate static void 9550Sstevel@tonic-gate rndc_addbytes(uint8_t *ptr, size_t len) 9560Sstevel@tonic-gate { 9570Sstevel@tonic-gate ASSERT(ptr != NULL && len > 0); 9580Sstevel@tonic-gate ASSERT(rnbyte_cnt <= RNDPOOLSIZE); 9590Sstevel@tonic-gate 9600Sstevel@tonic-gate mutex_enter(&rndpool_lock); 9610Sstevel@tonic-gate while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) { 9620Sstevel@tonic-gate rndpool[rindex] ^= *ptr; 9630Sstevel@tonic-gate ptr++; len--; 9640Sstevel@tonic-gate rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 9650Sstevel@tonic-gate rnbyte_cnt++; 9660Sstevel@tonic-gate } 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate /* Handle buffer full case */ 9690Sstevel@tonic-gate while (len > 0) { 9700Sstevel@tonic-gate rndpool[rindex] ^= *ptr; 9710Sstevel@tonic-gate ptr++; len--; 9720Sstevel@tonic-gate findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1); 9730Sstevel@tonic-gate } 9740Sstevel@tonic-gate mutex_exit(&rndpool_lock); 9750Sstevel@tonic-gate } 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate /* 9780Sstevel@tonic-gate * Caller should check len <= rnbyte_cnt under the 9790Sstevel@tonic-gate * rndpool_lock before calling. 9800Sstevel@tonic-gate */ 9810Sstevel@tonic-gate static void 9820Sstevel@tonic-gate rndc_getbytes(uint8_t *ptr, size_t len) 9830Sstevel@tonic-gate { 9840Sstevel@tonic-gate ASSERT(MUTEX_HELD(&rndpool_lock)); 9850Sstevel@tonic-gate ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE); 9860Sstevel@tonic-gate 9870Sstevel@tonic-gate BUMP_RND_STATS(rs_rndcOut, len); 9880Sstevel@tonic-gate 9890Sstevel@tonic-gate while (len > 0) { 9900Sstevel@tonic-gate *ptr = rndpool[findex]; 9910Sstevel@tonic-gate ptr++; len--; 9920Sstevel@tonic-gate findex = (findex + 1) & (RNDPOOLSIZE - 1); 9930Sstevel@tonic-gate rnbyte_cnt--; 9940Sstevel@tonic-gate } 9950Sstevel@tonic-gate } 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate /* Random number exported entry points */ 9980Sstevel@tonic-gate 9990Sstevel@tonic-gate /* 10000Sstevel@tonic-gate * Mix the supplied bytes into the entropy pool of a kCF 10010Sstevel@tonic-gate * RNG provider. 10020Sstevel@tonic-gate */ 10030Sstevel@tonic-gate /* ARGSUSED */ 10040Sstevel@tonic-gate int 10050Sstevel@tonic-gate random_add_entropy(uint8_t *ptr, size_t len, uint16_t entropy_est) 10060Sstevel@tonic-gate { 10070Sstevel@tonic-gate if (len < 1) 10080Sstevel@tonic-gate return (-1); 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate rngprov_seed(ptr, len); 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate return (0); 10130Sstevel@tonic-gate } 10140Sstevel@tonic-gate 10150Sstevel@tonic-gate /* 10160Sstevel@tonic-gate * Get bytes from the /dev/urandom generator. This function 10170Sstevel@tonic-gate * always succeeds. Returns 0. 10180Sstevel@tonic-gate */ 10190Sstevel@tonic-gate int 10200Sstevel@tonic-gate random_get_pseudo_bytes(uint8_t *ptr, size_t len) 10210Sstevel@tonic-gate { 10220Sstevel@tonic-gate ASSERT(!mutex_owned(&rndpool_lock)); 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate if (len < 1) 10250Sstevel@tonic-gate return (0); 10260Sstevel@tonic-gate return (kcf_rnd_get_pseudo_bytes(ptr, len)); 10270Sstevel@tonic-gate } 10280Sstevel@tonic-gate 10290Sstevel@tonic-gate /* 10300Sstevel@tonic-gate * Get bytes from the /dev/random generator. Returns 0 10310Sstevel@tonic-gate * on success. Returns EAGAIN if there is insufficient entropy. 10320Sstevel@tonic-gate */ 10330Sstevel@tonic-gate int 10340Sstevel@tonic-gate random_get_bytes(uint8_t *ptr, size_t len) 10350Sstevel@tonic-gate { 10360Sstevel@tonic-gate ASSERT(!mutex_owned(&rndpool_lock)); 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate if (len < 1) 10390Sstevel@tonic-gate return (0); 10400Sstevel@tonic-gate return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE)); 10410Sstevel@tonic-gate } 1042